Nymbo commited on
Commit
8840c3f
·
verified ·
1 Parent(s): b944245

Update ui_components.py

Browse files
Files changed (1) hide show
  1. ui_components.py +32 -14
ui_components.py CHANGED
@@ -129,7 +129,7 @@ class UIComponents:
129
  choices=[],
130
  label="🤖 Model",
131
  value=None,
132
- info="Select GPT OSS model variant"
133
  )
134
 
135
  # Status display
@@ -147,8 +147,13 @@ class UIComponents:
147
  self.presence_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Presence penalty")
148
  self.stop_sequences = gr.Textbox(label="Stop sequences (comma-separated)", placeholder="e.g. \n\n, User:")
149
 
150
- # Reasoning effort (GPT-OSS)
151
- self.reasoning_effort = gr.Radio(choices=["low", "medium", "high"], value=AppConfig.DEFAULT_REASONING_EFFORT, label="Reasoning effort (GPT‑OSS)")
 
 
 
 
 
152
 
153
  # Response format controls
154
  with gr.Row():
@@ -187,10 +192,17 @@ class UIComponents:
187
  "presence_penalty": float(presence_penalty) if presence_penalty is not None else None,
188
  # stop: list[str]
189
  "stop": [s.strip() for s in stop_sequences.split(",") if s.strip()] if stop_sequences else None,
190
- # GPT-OSS specific control stored separately; mcp_client merges it
191
- "reasoning_effort": reasoning_effort,
192
  }
193
 
 
 
 
 
 
 
 
 
 
194
  # response_format
195
  if response_format == "json_object":
196
  params["response_format"] = {"type": "json_object"}
@@ -389,7 +401,7 @@ class UIComponents:
389
  # Provider selection with auto-model loading
390
  def handle_provider_change(provider_id):
391
  if not provider_id:
392
- return gr.Dropdown(choices=[], value=None), "⚪ Select provider first"
393
 
394
  available_models = AppConfig.get_available_models_for_provider(provider_id)
395
  model_choices = [(AppConfig.AVAILABLE_MODELS[model]["name"], model) for model in available_models]
@@ -404,16 +416,18 @@ class UIComponents:
404
  status_msg = f"✅ Provider selected, model auto-selected ({context_length:,} token context)"
405
  else:
406
  status_msg = "✅ Provider selected, please select a model"
407
-
 
408
  return (
409
  gr.Dropdown(choices=model_choices, value=default_model, label="🤖 Model"),
410
- status_msg
 
411
  )
412
 
413
  # Model selection
414
  def handle_model_change(provider_id, model_id):
415
  if not provider_id or not model_id:
416
- return "⚪ Select both provider and model"
417
 
418
  self.mcp_client.set_model_and_provider(provider_id, model_id)
419
 
@@ -423,9 +437,13 @@ class UIComponents:
423
  active_params = model_info.get("active_params", "N/A")
424
 
425
  if self.mcp_client.hf_client:
426
- return f"✅ Ready! Using {active_params} active params, {context_length:,} token context"
427
  else:
428
- return "❌ Please add your Hugging Face API token"
 
 
 
 
429
 
430
  # Chat handlers
431
  def submit_message(message, history):
@@ -564,7 +582,7 @@ class UIComponents:
564
  # Connect provider/model dropdowns with auto-selection on load
565
  demo.load(
566
  fn=lambda: handle_provider_change("cerebras"),
567
- outputs=[self.model_dropdown, self.api_status]
568
  )
569
 
570
  # Initialise default mcp server load
@@ -576,13 +594,13 @@ class UIComponents:
576
  self.provider_dropdown.change(
577
  handle_provider_change,
578
  inputs=[self.provider_dropdown],
579
- outputs=[self.model_dropdown, self.api_status]
580
  )
581
 
582
  self.model_dropdown.change(
583
  handle_model_change,
584
  inputs=[self.provider_dropdown, self.model_dropdown],
585
- outputs=[self.api_status]
586
  )
587
 
588
  # Connect chat
 
129
  choices=[],
130
  label="🤖 Model",
131
  value=None,
132
+ info="Select model"
133
  )
134
 
135
  # Status display
 
147
  self.presence_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Presence penalty")
148
  self.stop_sequences = gr.Textbox(label="Stop sequences (comma-separated)", placeholder="e.g. \n\n, User:")
149
 
150
+ # Reasoning effort (GPT-OSS only)
151
+ with gr.Group(visible=True) as self.reasoning_group:
152
+ self.reasoning_effort = gr.Radio(
153
+ choices=["low", "medium", "high"],
154
+ value=AppConfig.DEFAULT_REASONING_EFFORT,
155
+ label="Reasoning effort (GPT‑OSS)"
156
+ )
157
 
158
  # Response format controls
159
  with gr.Row():
 
192
  "presence_penalty": float(presence_penalty) if presence_penalty is not None else None,
193
  # stop: list[str]
194
  "stop": [s.strip() for s in stop_sequences.split(",") if s.strip()] if stop_sequences else None,
 
 
195
  }
196
 
197
+ # Only include reasoning_effort for GPT-OSS models
198
+ try:
199
+ current_model = self.mcp_client.current_model
200
+ if current_model and AppConfig.is_gpt_oss_model(current_model):
201
+ params["reasoning_effort"] = reasoning_effort
202
+ except Exception:
203
+ # If any issue, omit reasoning
204
+ pass
205
+
206
  # response_format
207
  if response_format == "json_object":
208
  params["response_format"] = {"type": "json_object"}
 
401
  # Provider selection with auto-model loading
402
  def handle_provider_change(provider_id):
403
  if not provider_id:
404
+ return gr.Dropdown(choices=[], value=None), "⚪ Select provider first", gr.Group(visible=False)
405
 
406
  available_models = AppConfig.get_available_models_for_provider(provider_id)
407
  model_choices = [(AppConfig.AVAILABLE_MODELS[model]["name"], model) for model in available_models]
 
416
  status_msg = f"✅ Provider selected, model auto-selected ({context_length:,} token context)"
417
  else:
418
  status_msg = "✅ Provider selected, please select a model"
419
+ # Reasoning UI visibility based on whether model is GPT-OSS
420
+ show_reasoning = AppConfig.is_gpt_oss_model(default_model) if default_model else False
421
  return (
422
  gr.Dropdown(choices=model_choices, value=default_model, label="🤖 Model"),
423
+ status_msg,
424
+ gr.Group(visible=show_reasoning)
425
  )
426
 
427
  # Model selection
428
  def handle_model_change(provider_id, model_id):
429
  if not provider_id or not model_id:
430
+ return "⚪ Select both provider and model", gr.Group(visible=False)
431
 
432
  self.mcp_client.set_model_and_provider(provider_id, model_id)
433
 
 
437
  active_params = model_info.get("active_params", "N/A")
438
 
439
  if self.mcp_client.hf_client:
440
+ status = f"✅ Ready! Using {active_params} active params, {context_length:,} token context"
441
  else:
442
+ status = "❌ Please add your Hugging Face API token"
443
+
444
+ # Toggle reasoning UI by model family
445
+ show_reasoning = AppConfig.is_gpt_oss_model(model_id)
446
+ return status, gr.Group(visible=show_reasoning)
447
 
448
  # Chat handlers
449
  def submit_message(message, history):
 
582
  # Connect provider/model dropdowns with auto-selection on load
583
  demo.load(
584
  fn=lambda: handle_provider_change("cerebras"),
585
+ outputs=[self.model_dropdown, self.api_status, self.reasoning_group]
586
  )
587
 
588
  # Initialise default mcp server load
 
594
  self.provider_dropdown.change(
595
  handle_provider_change,
596
  inputs=[self.provider_dropdown],
597
+ outputs=[self.model_dropdown, self.api_status, self.reasoning_group]
598
  )
599
 
600
  self.model_dropdown.change(
601
  handle_model_change,
602
  inputs=[self.provider_dropdown, self.model_dropdown],
603
+ outputs=[self.api_status, self.reasoning_group]
604
  )
605
 
606
  # Connect chat