|
|
""" |
|
|
UI Components for Universal MCP Client - Fixed with optimal MCP guidance |
|
|
""" |
|
|
import gradio as gr |
|
|
from gradio import ChatMessage |
|
|
from typing import Tuple, List, Dict, Any |
|
|
import os |
|
|
import json |
|
|
import logging |
|
|
import traceback |
|
|
from openai import OpenAI |
|
|
|
|
|
from config import AppConfig, CUSTOM_CSS |
|
|
from chat_handler import ChatHandler |
|
|
from server_manager import ServerManager |
|
|
from mcp_client import UniversalMCPClient |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class UIComponents: |
|
|
"""Manages Gradio UI components with improved MCP server management""" |
|
|
|
|
|
def __init__(self, mcp_client: UniversalMCPClient): |
|
|
self.mcp_client = mcp_client |
|
|
self.chat_handler = ChatHandler(mcp_client) |
|
|
self.server_manager = ServerManager(mcp_client) |
|
|
self.current_user = None |
|
|
|
|
|
def _initialize_default_servers(self): |
|
|
"""Initialize default MCP servers on app startup""" |
|
|
default_servers = [ |
|
|
("Nymbo-Tools", "Nymbo/Tools"), |
|
|
("background removal", "ysharma/background-removal-mcp"), |
|
|
] |
|
|
|
|
|
logger.info("🚀 Initializing default MCP servers...") |
|
|
|
|
|
for server_name, space_id in default_servers: |
|
|
try: |
|
|
status_msg, _ = self.server_manager.add_custom_server(server_name, space_id) |
|
|
if "✅" in status_msg: |
|
|
logger.info(f"✅ Added default server: {server_name}") |
|
|
else: |
|
|
logger.warning(f"⚠️ Failed to add default server {server_name}: {status_msg}") |
|
|
except Exception as e: |
|
|
logger.error(f"❌ Error adding default server {server_name}: {e}") |
|
|
|
|
|
logger.info(f"📊 Initialized {len(self.mcp_client.servers)} default servers") |
|
|
|
|
|
def create_interface(self) -> gr.Blocks: |
|
|
"""Create the main Gradio interface with improved layout""" |
|
|
with gr.Blocks( |
|
|
title="Universal MCP Client - HF Inference Powered", |
|
|
theme="Nymbo/Nymbo_Theme", |
|
|
fill_height=True, |
|
|
css=CUSTOM_CSS |
|
|
) as demo: |
|
|
|
|
|
|
|
|
self._create_sidebar() |
|
|
|
|
|
|
|
|
chatbot = self._create_main_chat_area() |
|
|
|
|
|
|
|
|
self._setup_event_handlers(chatbot, demo) |
|
|
|
|
|
return demo |
|
|
|
|
|
def _create_sidebar(self): |
|
|
"""Create the sidebar with login, provider/model selection, and server management""" |
|
|
with gr.Sidebar(elem_id="main-sidebar"): |
|
|
gr.Markdown("# 🤗 ChatMCP") |
|
|
|
|
|
|
|
|
self._create_api_key_section() |
|
|
|
|
|
|
|
|
self._create_provider_model_selection() |
|
|
|
|
|
|
|
|
self._create_server_management_section() |
|
|
|
|
|
|
|
|
with gr.Accordion("📚 Guide & Info", open=False): |
|
|
gr.Markdown(""" |
|
|
## 🎯 How To Use |
|
|
1. **Add Your API Key**: Paste a valid Hugging Face Inference API token |
|
|
2. **Add MCP Servers**: Connect to various AI tools on 🤗Hub |
|
|
3. **Enable/Disable Servers**: Use checkboxes to control which servers are active |
|
|
4. **Chat**: Interact with GPT-OSS and use connected MCP Servers |
|
|
|
|
|
## 💭 Features |
|
|
- **GPT-OSS Models**: OpenAI's latest open-source reasoning models (128k context) |
|
|
- **MCP Integration**: Connect to thousands of AI apps on Hub via MCP protocol |
|
|
- **Multi-Provider**: Access via Cerebras, Fireworks, Together AI, and others |
|
|
- **Media Support**: Automatic embedding of media -- images, audio, and video etc |
|
|
""") |
|
|
|
|
|
def _create_api_key_section(self): |
|
|
"""Create secret input section for Hugging Face API keys""" |
|
|
with gr.Group(elem_classes="login-section"): |
|
|
gr.Markdown(""" |
|
|
**🔐 HF Token** |
|
|
""") |
|
|
self.api_key_box = gr.Textbox( |
|
|
label="HF API Token", |
|
|
placeholder="hf_...", |
|
|
type="password", |
|
|
value=os.getenv("HF_TOKEN", "") |
|
|
) |
|
|
self.api_key_status = gr.Markdown("", visible=False, container=True) |
|
|
|
|
|
def _create_provider_model_selection(self): |
|
|
"""Create provider and model selection dropdowns with defaults""" |
|
|
with gr.Accordion("🚀 Inference Configuration", open=False): |
|
|
|
|
|
|
|
|
provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys()) |
|
|
self.provider_dropdown = gr.Dropdown( |
|
|
choices=provider_choices, |
|
|
label="🔧 Inference Provider", |
|
|
value="auto", |
|
|
info="Choose your preferred inference provider" |
|
|
) |
|
|
|
|
|
|
|
|
self.model_dropdown = gr.Dropdown( |
|
|
choices=[], |
|
|
label="🤖 Model", |
|
|
value=None, |
|
|
info="Select model" |
|
|
) |
|
|
|
|
|
|
|
|
self.api_status = gr.Markdown("⚪ Select provider and model to begin", container=True) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
self.temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.3, step=0.01, label="Temperature") |
|
|
self.top_p_slider = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Top-p") |
|
|
with gr.Row(): |
|
|
self.max_tokens_box = gr.Number(value=8192, precision=0, label="Max tokens") |
|
|
self.seed_box = gr.Number(value=None, precision=0, label="Seed (-1 for random)") |
|
|
with gr.Row(): |
|
|
self.frequency_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Frequency penalty") |
|
|
self.presence_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Presence penalty") |
|
|
self.stop_sequences = gr.Textbox(label="Stop sequences (comma-separated)", placeholder="e.g. \n\n, User:") |
|
|
|
|
|
|
|
|
with gr.Group(visible=True) as self.reasoning_group: |
|
|
self.reasoning_effort = gr.Radio( |
|
|
choices=["low", "medium", "high"], |
|
|
value=AppConfig.DEFAULT_REASONING_EFFORT, |
|
|
label="Reasoning effort (GPT‑OSS)" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
self.response_format = gr.Dropdown(choices=["text", "json_object", "json_schema"], value="text", label="Response format") |
|
|
with gr.Group(visible=False) as self.json_schema_group: |
|
|
self.json_schema_name = gr.Textbox(label="JSON schema name", placeholder="my_schema") |
|
|
self.json_schema_description = gr.Textbox(label="JSON schema description", placeholder="Describe the expected JSON") |
|
|
self.json_schema_editor = gr.Textbox(label="JSON schema (object)", lines=8, placeholder='{"type":"object","properties":{...},"required":[...]}' ) |
|
|
self.json_schema_strict = gr.Checkbox(value=False, label="Strict schema adherence") |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
self.tool_choice = gr.Dropdown(choices=["auto", "none", "required", "function"], value="auto", label="Tool choice") |
|
|
self.tool_function_name = gr.Textbox(label="Function name (when tool_choice=function)") |
|
|
self.tool_prompt = gr.Textbox(label="Tool prompt", placeholder="Optional prompt appended before the tools") |
|
|
self.tools_json = gr.Textbox(label="Tools (JSON array)", lines=8, placeholder='[{"type":"function","function":{"name":"fn","description":"...","parameters":{}}}]') |
|
|
|
|
|
def _on_response_format_change(fmt): |
|
|
return gr.Group(visible=(fmt == "json_schema")) |
|
|
self.response_format.change(_on_response_format_change, inputs=[self.response_format], outputs=[self.json_schema_group]) |
|
|
|
|
|
def update_generation_params( |
|
|
temperature, top_p, max_tokens, seed, |
|
|
frequency_penalty, presence_penalty, |
|
|
stop_sequences, reasoning_effort, response_format, |
|
|
json_schema_name, json_schema_description, json_schema_editor, json_schema_strict, |
|
|
tool_choice, tool_function_name, tool_prompt, tools_json |
|
|
): |
|
|
params = { |
|
|
"temperature": float(temperature) if temperature is not None else None, |
|
|
"top_p": float(top_p) if top_p is not None else None, |
|
|
"max_tokens": int(max_tokens) if max_tokens else None, |
|
|
|
|
|
"seed": (None if (seed in (-1, "-1")) else (int(seed) if seed not in (None, "") else None)), |
|
|
"frequency_penalty": float(frequency_penalty) if frequency_penalty is not None else None, |
|
|
"presence_penalty": float(presence_penalty) if presence_penalty is not None else None, |
|
|
|
|
|
"stop": [s.strip() for s in stop_sequences.split(",") if s.strip()] if stop_sequences else None, |
|
|
} |
|
|
|
|
|
|
|
|
try: |
|
|
current_model = self.mcp_client.current_model |
|
|
if current_model and AppConfig.is_gpt_oss_model(current_model): |
|
|
params["reasoning_effort"] = reasoning_effort |
|
|
except Exception: |
|
|
|
|
|
pass |
|
|
|
|
|
|
|
|
if response_format == "json_object": |
|
|
params["response_format"] = {"type": "json_object"} |
|
|
elif response_format == "json_schema": |
|
|
try: |
|
|
schema_obj = json.loads(json_schema_editor) if json_schema_editor else {} |
|
|
except Exception as e: |
|
|
return gr.Markdown(f"❌ Invalid JSON schema: {e}", visible=True) |
|
|
json_fmt = { |
|
|
"type": "json_schema", |
|
|
"json_schema": { |
|
|
"name": json_schema_name or "schema", |
|
|
"schema": schema_obj, |
|
|
}, |
|
|
} |
|
|
if json_schema_description: |
|
|
json_fmt["json_schema"]["description"] = json_schema_description |
|
|
if json_schema_strict: |
|
|
json_fmt["json_schema"]["strict"] = True |
|
|
params["response_format"] = json_fmt |
|
|
|
|
|
|
|
|
tools = None |
|
|
if tools_json and tools_json.strip(): |
|
|
try: |
|
|
parsed = json.loads(tools_json) |
|
|
if isinstance(parsed, list): |
|
|
tools = parsed |
|
|
else: |
|
|
return gr.Markdown("❌ Tools must be a JSON array.", visible=True) |
|
|
except Exception as e: |
|
|
return gr.Markdown(f"❌ Invalid tools JSON: {e}", visible=True) |
|
|
if tools is not None: |
|
|
params["tools"] = tools |
|
|
|
|
|
|
|
|
if tool_choice in ("auto", "none", "required"): |
|
|
params["tool_choice"] = tool_choice |
|
|
elif tool_choice == "function" and tool_function_name: |
|
|
params["tool_choice"] = {"type": "function", "function": {"name": tool_function_name}} |
|
|
|
|
|
|
|
|
if tool_prompt and tool_prompt.strip(): |
|
|
params["tool_prompt"] = tool_prompt.strip() |
|
|
self.mcp_client.set_generation_params(params) |
|
|
return gr.Markdown("✅ Inference parameters updated.") |
|
|
|
|
|
self.gen_param_status = gr.Markdown(visible=False) |
|
|
|
|
|
|
|
|
for comp in [ |
|
|
self.temperature_slider, self.top_p_slider, |
|
|
self.max_tokens_box, self.seed_box, self.frequency_penalty, |
|
|
self.presence_penalty, |
|
|
self.stop_sequences, self.reasoning_effort, self.response_format, |
|
|
self.json_schema_name, self.json_schema_description, self.json_schema_editor, self.json_schema_strict, |
|
|
self.tool_choice, self.tool_function_name, self.tool_prompt, self.tools_json |
|
|
]: |
|
|
comp.change( |
|
|
update_generation_params, |
|
|
inputs=[ |
|
|
self.temperature_slider, self.top_p_slider, |
|
|
self.max_tokens_box, self.seed_box, self.frequency_penalty, |
|
|
self.presence_penalty, |
|
|
self.stop_sequences, self.reasoning_effort, self.response_format, |
|
|
self.json_schema_name, self.json_schema_description, self.json_schema_editor, self.json_schema_strict, |
|
|
self.tool_choice, self.tool_function_name, self.tool_prompt, self.tools_json |
|
|
], |
|
|
outputs=[self.gen_param_status] |
|
|
) |
|
|
|
|
|
def _create_server_management_section(self): |
|
|
"""Create the server management section with checkboxes and guidance""" |
|
|
with gr.Group(): |
|
|
gr.Markdown("## 🔧 MCP Servers", container=True) |
|
|
|
|
|
|
|
|
gr.Markdown(""" |
|
|
<div style="background: #f0f8ff; padding: 10px; border-radius: 5px; border-left: 3px solid #4169e1; margin-bottom: 10px;"> |
|
|
<strong>💡 Best Practice:</strong> For optimal performance, we recommend keeping |
|
|
<strong>3-6 MCP servers</strong> enabled at once. Too many servers can: |
|
|
• Increase context usage (reducing available tokens for conversation) |
|
|
• Potentially confuse the model when selecting tools |
|
|
• Slow down response times |
|
|
|
|
|
You can add more servers but selectively enable only the ones you need for your current task. |
|
|
</div> |
|
|
""", container=True) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
self.add_server_btn = gr.Button("Add MCP Server", variant="primary", size="sm") |
|
|
self.remove_all_btn = gr.Button("Remove All", variant="secondary", size="sm") |
|
|
|
|
|
|
|
|
self.save_server_btn = gr.Button("Save Server", variant="primary", size="sm", visible=False) |
|
|
|
|
|
|
|
|
from mcp_spaces_finder import _finder |
|
|
spaces = _finder.get_mcp_spaces() |
|
|
self.mcp_dropdown = gr.Dropdown( |
|
|
choices=spaces, |
|
|
label=f"**Available MCP Servers ({len(spaces)}**)", |
|
|
value=None, |
|
|
info="Choose from HuggingFace spaces", |
|
|
allow_custom_value=True, |
|
|
visible=False |
|
|
) |
|
|
|
|
|
self.server_name = gr.Textbox( |
|
|
label="Server Title", |
|
|
placeholder="e.g., Text to Image Generator", |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
self.server_checkboxes = gr.CheckboxGroup( |
|
|
label="Active Servers (Check to enable)", |
|
|
choices=[], |
|
|
value=[], |
|
|
info="✅ Enabled servers can be used | ⬜ Disabled servers are ignored" |
|
|
) |
|
|
|
|
|
self.add_server_output = gr.Markdown("", visible=False, container=True) |
|
|
|
|
|
def _create_main_chat_area(self) -> gr.Chatbot: |
|
|
"""Create the main chat area""" |
|
|
with gr.Column(elem_classes="main-content"): |
|
|
chatbot = gr.Chatbot( |
|
|
label="Universal MCP-Powered AI Assistant", |
|
|
show_label=False, |
|
|
type="messages", |
|
|
scale=1, |
|
|
show_copy_button=True, |
|
|
avatar_images=None, |
|
|
value=[ |
|
|
ChatMessage( |
|
|
role="assistant", |
|
|
content="""Welcome! I'm your MCP-powered AI assistant using OpenAI's GPT-OSS models via HuggingFace Inference Providers. |
|
|
|
|
|
🎉 **Pre-loaded MCP servers ready to use:** |
|
|
- **Nymbo-Tools** - Web Fetch, Web Search, Code Interpreter, Memory, Deep Research, Speech/Image/Video Gen |
|
|
- **background removal** - Remove backgrounds from images |
|
|
|
|
|
You can start using these servers right away, add more servers, or remove them as needed. Try asking me to generate an image, create speech, or any other task!""" |
|
|
) |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
with gr.Column(scale=0, elem_classes="input-area"): |
|
|
self.chat_input = gr.MultimodalTextbox( |
|
|
interactive=True, |
|
|
file_count="multiple", |
|
|
placeholder="Enter message or upload files...", |
|
|
show_label=False, |
|
|
sources=["upload", "microphone"], |
|
|
file_types=None |
|
|
) |
|
|
|
|
|
return chatbot |
|
|
|
|
|
def _setup_event_handlers(self, chatbot: gr.Chatbot, demo: gr.Blocks): |
|
|
"""Set up all event handlers""" |
|
|
|
|
|
def handle_api_key_update(api_key: str): |
|
|
"""Persist user-provided API key for the current session""" |
|
|
if not api_key: |
|
|
os.environ.pop("HF_TOKEN", None) |
|
|
AppConfig.HF_TOKEN = None |
|
|
self.mcp_client.hf_client = None |
|
|
return gr.Markdown("⚠️ API token cleared. Add a token to enable calls.", visible=True) |
|
|
|
|
|
token = api_key.strip() |
|
|
os.environ["HF_TOKEN"] = token |
|
|
AppConfig.HF_TOKEN = token |
|
|
|
|
|
try: |
|
|
self.mcp_client.hf_client = OpenAI( |
|
|
base_url="https://router.huggingface.co/v1", |
|
|
api_key=token |
|
|
) |
|
|
logger.info("✅ HuggingFace client configured from pasted token") |
|
|
return gr.Markdown("✅ API token saved for this session.", visible=True) |
|
|
except Exception as exc: |
|
|
logger.error(f"❌ Failed to configure HF client with provided token: {exc}") |
|
|
return gr.Markdown("❌ Invalid token. Please verify and try again.", visible=True) |
|
|
|
|
|
def initialize_api_key_status(): |
|
|
token_present = bool(os.getenv("HF_TOKEN")) |
|
|
if token_present: |
|
|
return gr.Markdown("✅ API token detected from environment.", visible=True) |
|
|
return gr.Markdown("", visible=False) |
|
|
|
|
|
|
|
|
def handle_provider_change(provider_id): |
|
|
if not provider_id: |
|
|
return gr.Dropdown(choices=[], value=None), "⚪ Select provider first", gr.Group(visible=False) |
|
|
|
|
|
available_models = AppConfig.get_available_models_for_provider(provider_id) |
|
|
model_choices = [(AppConfig.AVAILABLE_MODELS[model]["name"], model) for model in available_models] |
|
|
|
|
|
|
|
|
default_model = "openai/gpt-oss-120b" if "openai/gpt-oss-120b" in available_models else (available_models[0] if available_models else None) |
|
|
|
|
|
|
|
|
if default_model: |
|
|
model_info = AppConfig.AVAILABLE_MODELS.get(default_model, {}) |
|
|
context_length = model_info.get("context_length", 128000) |
|
|
status_msg = f"✅ Provider selected, model auto-selected ({context_length:,} token context)" |
|
|
else: |
|
|
status_msg = "✅ Provider selected, please select a model" |
|
|
|
|
|
show_reasoning = AppConfig.is_gpt_oss_model(default_model) if default_model else False |
|
|
return ( |
|
|
gr.Dropdown(choices=model_choices, value=default_model, label="🤖 Model"), |
|
|
status_msg, |
|
|
gr.Group(visible=show_reasoning) |
|
|
) |
|
|
|
|
|
|
|
|
def handle_model_change(provider_id, model_id): |
|
|
if not provider_id or not model_id: |
|
|
return "⚪ Select both provider and model", gr.Group(visible=False) |
|
|
|
|
|
self.mcp_client.set_model_and_provider(provider_id, model_id) |
|
|
|
|
|
|
|
|
model_info = AppConfig.AVAILABLE_MODELS.get(model_id, {}) |
|
|
context_length = model_info.get("context_length", 128000) |
|
|
active_params = model_info.get("active_params", "N/A") |
|
|
|
|
|
if self.mcp_client.hf_client: |
|
|
status = f"✅ Ready! Using {active_params} active params, {context_length:,} token context" |
|
|
else: |
|
|
status = "❌ Please add your Hugging Face API token" |
|
|
|
|
|
|
|
|
show_reasoning = AppConfig.is_gpt_oss_model(model_id) |
|
|
return status, gr.Group(visible=show_reasoning) |
|
|
|
|
|
|
|
|
def submit_message(message, history): |
|
|
if message and (message.get("text", "").strip() or message.get("files", [])): |
|
|
converted_history = [] |
|
|
for msg in history: |
|
|
if isinstance(msg, dict): |
|
|
converted_history.append(ChatMessage( |
|
|
role=msg.get('role', 'assistant'), |
|
|
content=msg.get('content', ''), |
|
|
metadata=msg.get('metadata', None) |
|
|
)) |
|
|
else: |
|
|
converted_history.append(msg) |
|
|
|
|
|
new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history) |
|
|
return new_history, cleared_input |
|
|
return history, gr.MultimodalTextbox(value=None, interactive=False) |
|
|
|
|
|
def enable_input(): |
|
|
return gr.MultimodalTextbox(interactive=True) |
|
|
|
|
|
def show_add_server_fields(): |
|
|
return [ |
|
|
gr.Dropdown(visible=True), |
|
|
gr.Textbox(visible=True), |
|
|
gr.Button(interactive=False), |
|
|
gr.Button(visible=True) |
|
|
] |
|
|
|
|
|
def hide_add_server_fields(): |
|
|
return [ |
|
|
gr.Dropdown(visible=False, value=None), |
|
|
gr.Textbox(visible=False, value=""), |
|
|
gr.Button(interactive=True), |
|
|
gr.Button(visible=False) |
|
|
] |
|
|
|
|
|
def handle_add_server(server_title, selected_space): |
|
|
if not server_title or not selected_space: |
|
|
return [ |
|
|
gr.Dropdown(visible=False, value=None), |
|
|
gr.Textbox(visible=False, value=""), |
|
|
gr.Button(interactive=True), |
|
|
gr.Button(visible=False), |
|
|
gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()), |
|
|
value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]), |
|
|
gr.Markdown("❌ Please provide both server title and space selection", visible=True) |
|
|
] |
|
|
|
|
|
try: |
|
|
status_msg, _ = self.server_manager.add_custom_server(server_title.strip(), selected_space) |
|
|
|
|
|
|
|
|
server_choices = list(self.mcp_client.servers.keys()) |
|
|
enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled] |
|
|
|
|
|
|
|
|
warning_msg = "" |
|
|
if len(enabled_servers) > 6: |
|
|
warning_msg = "\n\n⚠️ **Note:** You have more than 6 servers enabled. Consider disabling some for better performance." |
|
|
|
|
|
return [ |
|
|
gr.Dropdown(visible=False, value=None), |
|
|
gr.Textbox(visible=False, value=""), |
|
|
gr.Button(interactive=True), |
|
|
gr.Button(visible=False), |
|
|
gr.CheckboxGroup(choices=server_choices, value=enabled_servers), |
|
|
gr.Markdown(status_msg + warning_msg, visible=True) |
|
|
] |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Error adding server: {e}") |
|
|
return [ |
|
|
gr.Dropdown(visible=False, value=None), |
|
|
gr.Textbox(visible=False, value=""), |
|
|
gr.Button(interactive=True), |
|
|
gr.Button(visible=False), |
|
|
gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()), |
|
|
value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]), |
|
|
gr.Markdown(f"❌ Error: {str(e)}", visible=True) |
|
|
] |
|
|
|
|
|
def handle_server_toggle(enabled_servers): |
|
|
"""Handle enabling/disabling servers via checkboxes""" |
|
|
|
|
|
for server_name in self.mcp_client.servers.keys(): |
|
|
self.mcp_client.enable_server(server_name, server_name in enabled_servers) |
|
|
|
|
|
enabled_count = len(enabled_servers) |
|
|
|
|
|
|
|
|
if enabled_count == 0: |
|
|
message = "ℹ️ No servers enabled - chatbot will use native capabilities only" |
|
|
elif enabled_count <= 6: |
|
|
message = f"✅ {enabled_count} server{'s' if enabled_count != 1 else ''} enabled - optimal configuration" |
|
|
else: |
|
|
message = f"⚠️ {enabled_count} servers enabled - consider reducing to 3-6 for better performance" |
|
|
|
|
|
return gr.Markdown(message, visible=True) |
|
|
|
|
|
def handle_remove_all(): |
|
|
"""Remove all MCP servers""" |
|
|
count = self.mcp_client.remove_all_servers() |
|
|
return [ |
|
|
gr.CheckboxGroup(choices=[], value=[]), |
|
|
gr.Markdown(f"✅ Removed all {count} servers", visible=True) |
|
|
] |
|
|
|
|
|
|
|
|
def initialize_defaults(): |
|
|
"""Initialize default servers and update UI on app load""" |
|
|
self._initialize_default_servers() |
|
|
|
|
|
|
|
|
server_choices = list(self.mcp_client.servers.keys()) |
|
|
enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled] |
|
|
|
|
|
return gr.CheckboxGroup( |
|
|
choices=server_choices, |
|
|
value=enabled_servers, |
|
|
label=f"Active Servers ({len(server_choices)} loaded)" |
|
|
) |
|
|
|
|
|
self.api_key_box.input( |
|
|
handle_api_key_update, |
|
|
inputs=[self.api_key_box], |
|
|
outputs=[self.api_key_status] |
|
|
) |
|
|
|
|
|
demo.load( |
|
|
fn=initialize_api_key_status, |
|
|
outputs=[self.api_key_status] |
|
|
) |
|
|
|
|
|
|
|
|
demo.load( |
|
|
fn=lambda: handle_provider_change("auto"), |
|
|
outputs=[self.model_dropdown, self.api_status, self.reasoning_group] |
|
|
) |
|
|
|
|
|
|
|
|
demo.load( |
|
|
fn=initialize_defaults, |
|
|
outputs=[self.server_checkboxes] |
|
|
) |
|
|
|
|
|
self.provider_dropdown.change( |
|
|
handle_provider_change, |
|
|
inputs=[self.provider_dropdown], |
|
|
outputs=[self.model_dropdown, self.api_status, self.reasoning_group] |
|
|
) |
|
|
|
|
|
self.model_dropdown.change( |
|
|
handle_model_change, |
|
|
inputs=[self.provider_dropdown, self.model_dropdown], |
|
|
outputs=[self.api_status, self.reasoning_group] |
|
|
) |
|
|
|
|
|
|
|
|
chat_submit = self.chat_input.submit( |
|
|
submit_message, |
|
|
inputs=[self.chat_input, chatbot], |
|
|
outputs=[chatbot, self.chat_input] |
|
|
) |
|
|
chat_submit.then(enable_input, None, [self.chat_input]) |
|
|
|
|
|
|
|
|
self.add_server_btn.click( |
|
|
fn=show_add_server_fields, |
|
|
outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn] |
|
|
) |
|
|
|
|
|
|
|
|
self.save_server_btn.click( |
|
|
fn=handle_add_server, |
|
|
inputs=[self.server_name, self.mcp_dropdown], |
|
|
outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn, self.server_checkboxes, self.add_server_output] |
|
|
) |
|
|
|
|
|
self.server_checkboxes.change( |
|
|
handle_server_toggle, |
|
|
inputs=[self.server_checkboxes], |
|
|
outputs=[self.add_server_output] |
|
|
) |
|
|
|
|
|
self.remove_all_btn.click( |
|
|
handle_remove_all, |
|
|
outputs=[self.server_checkboxes, self.add_server_output] |
|
|
) |