Update ui_components.py
Browse files- ui_components.py +11 -25
ui_components.py
CHANGED
|
@@ -139,17 +139,12 @@ class UIComponents:
|
|
| 139 |
with gr.Row():
|
| 140 |
self.temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.3, step=0.01, label="Temperature")
|
| 141 |
self.top_p_slider = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Top-p")
|
| 142 |
-
self.top_logprobs = gr.Slider(minimum=0, maximum=5, value=0, step=1, label="Top logprobs")
|
| 143 |
with gr.Row():
|
| 144 |
self.max_tokens_box = gr.Number(value=8192, precision=0, label="Max tokens")
|
| 145 |
-
self.seed_box = gr.Number(value=None, precision=0, label="Seed")
|
| 146 |
with gr.Row():
|
| 147 |
self.frequency_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Frequency penalty")
|
| 148 |
self.presence_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Presence penalty")
|
| 149 |
-
with gr.Row():
|
| 150 |
-
self.logprobs_chk = gr.Checkbox(value=False, label="Return logprobs")
|
| 151 |
-
self.stream_chk = gr.Checkbox(value=False, label="Stream responses (not yet supported in UI)")
|
| 152 |
-
self.stream_include_usage = gr.Checkbox(value=False, label="Stream include usage")
|
| 153 |
self.stop_sequences = gr.Textbox(label="Stop sequences (comma-separated)", placeholder="e.g. \n\n, User:")
|
| 154 |
|
| 155 |
# Reasoning effort (GPT-OSS)
|
|
@@ -176,31 +171,24 @@ class UIComponents:
|
|
| 176 |
self.response_format.change(_on_response_format_change, inputs=[self.response_format], outputs=[self.json_schema_group])
|
| 177 |
|
| 178 |
def update_generation_params(
|
| 179 |
-
temperature, top_p,
|
| 180 |
-
frequency_penalty, presence_penalty,
|
| 181 |
stop_sequences, reasoning_effort, response_format,
|
| 182 |
json_schema_name, json_schema_description, json_schema_editor, json_schema_strict,
|
| 183 |
-
tool_choice, tool_function_name, tool_prompt, tools_json
|
| 184 |
):
|
| 185 |
params = {
|
| 186 |
"temperature": float(temperature) if temperature is not None else None,
|
| 187 |
"top_p": float(top_p) if top_p is not None else None,
|
| 188 |
-
"top_logprobs": int(top_logprobs) if top_logprobs else None,
|
| 189 |
"max_tokens": int(max_tokens) if max_tokens else None,
|
| 190 |
-
"seed":
|
| 191 |
"frequency_penalty": float(frequency_penalty) if frequency_penalty is not None else None,
|
| 192 |
"presence_penalty": float(presence_penalty) if presence_penalty is not None else None,
|
| 193 |
-
"logprobs": bool(logprobs),
|
| 194 |
-
# Prevent enabling streaming until UI supports it end-to-end
|
| 195 |
-
"stream": False,
|
| 196 |
# stop: list[str]
|
| 197 |
"stop": [s.strip() for s in stop_sequences.split(",") if s.strip()] if stop_sequences else None,
|
| 198 |
# GPT-OSS specific control stored separately; mcp_client merges it
|
| 199 |
"reasoning_effort": reasoning_effort,
|
| 200 |
}
|
| 201 |
-
# stream options
|
| 202 |
-
if stream_include_usage:
|
| 203 |
-
params["stream_options"] = {"include_usage": True}
|
| 204 |
|
| 205 |
# response_format
|
| 206 |
if response_format == "json_object":
|
|
@@ -253,24 +241,22 @@ class UIComponents:
|
|
| 253 |
|
| 254 |
# Wire updates on change
|
| 255 |
for comp in [
|
| 256 |
-
self.temperature_slider, self.top_p_slider,
|
| 257 |
self.max_tokens_box, self.seed_box, self.frequency_penalty,
|
| 258 |
-
self.presence_penalty,
|
| 259 |
self.stop_sequences, self.reasoning_effort, self.response_format,
|
| 260 |
self.json_schema_name, self.json_schema_description, self.json_schema_editor, self.json_schema_strict,
|
| 261 |
-
self.tool_choice, self.tool_function_name, self.tool_prompt, self.tools_json
|
| 262 |
-
self.stream_include_usage
|
| 263 |
]:
|
| 264 |
comp.change(
|
| 265 |
update_generation_params,
|
| 266 |
inputs=[
|
| 267 |
-
self.temperature_slider, self.top_p_slider,
|
| 268 |
self.max_tokens_box, self.seed_box, self.frequency_penalty,
|
| 269 |
-
self.presence_penalty,
|
| 270 |
self.stop_sequences, self.reasoning_effort, self.response_format,
|
| 271 |
self.json_schema_name, self.json_schema_description, self.json_schema_editor, self.json_schema_strict,
|
| 272 |
-
self.tool_choice, self.tool_function_name, self.tool_prompt, self.tools_json
|
| 273 |
-
self.stream_include_usage
|
| 274 |
],
|
| 275 |
outputs=[self.gen_param_status]
|
| 276 |
)
|
|
|
|
| 139 |
with gr.Row():
|
| 140 |
self.temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.3, step=0.01, label="Temperature")
|
| 141 |
self.top_p_slider = gr.Slider(minimum=0.0, maximum=1.0, value=1.0, step=0.01, label="Top-p")
|
|
|
|
| 142 |
with gr.Row():
|
| 143 |
self.max_tokens_box = gr.Number(value=8192, precision=0, label="Max tokens")
|
| 144 |
+
self.seed_box = gr.Number(value=None, precision=0, label="Seed (-1 = random)")
|
| 145 |
with gr.Row():
|
| 146 |
self.frequency_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Frequency penalty")
|
| 147 |
self.presence_penalty = gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.01, label="Presence penalty")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
self.stop_sequences = gr.Textbox(label="Stop sequences (comma-separated)", placeholder="e.g. \n\n, User:")
|
| 149 |
|
| 150 |
# Reasoning effort (GPT-OSS)
|
|
|
|
| 171 |
self.response_format.change(_on_response_format_change, inputs=[self.response_format], outputs=[self.json_schema_group])
|
| 172 |
|
| 173 |
def update_generation_params(
|
| 174 |
+
temperature, top_p, max_tokens, seed,
|
| 175 |
+
frequency_penalty, presence_penalty,
|
| 176 |
stop_sequences, reasoning_effort, response_format,
|
| 177 |
json_schema_name, json_schema_description, json_schema_editor, json_schema_strict,
|
| 178 |
+
tool_choice, tool_function_name, tool_prompt, tools_json
|
| 179 |
):
|
| 180 |
params = {
|
| 181 |
"temperature": float(temperature) if temperature is not None else None,
|
| 182 |
"top_p": float(top_p) if top_p is not None else None,
|
|
|
|
| 183 |
"max_tokens": int(max_tokens) if max_tokens else None,
|
| 184 |
+
"seed": (None if seed in (None, "", -1) else int(seed)),
|
| 185 |
"frequency_penalty": float(frequency_penalty) if frequency_penalty is not None else None,
|
| 186 |
"presence_penalty": float(presence_penalty) if presence_penalty is not None else None,
|
|
|
|
|
|
|
|
|
|
| 187 |
# stop: list[str]
|
| 188 |
"stop": [s.strip() for s in stop_sequences.split(",") if s.strip()] if stop_sequences else None,
|
| 189 |
# GPT-OSS specific control stored separately; mcp_client merges it
|
| 190 |
"reasoning_effort": reasoning_effort,
|
| 191 |
}
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
# response_format
|
| 194 |
if response_format == "json_object":
|
|
|
|
| 241 |
|
| 242 |
# Wire updates on change
|
| 243 |
for comp in [
|
| 244 |
+
self.temperature_slider, self.top_p_slider,
|
| 245 |
self.max_tokens_box, self.seed_box, self.frequency_penalty,
|
| 246 |
+
self.presence_penalty,
|
| 247 |
self.stop_sequences, self.reasoning_effort, self.response_format,
|
| 248 |
self.json_schema_name, self.json_schema_description, self.json_schema_editor, self.json_schema_strict,
|
| 249 |
+
self.tool_choice, self.tool_function_name, self.tool_prompt, self.tools_json
|
|
|
|
| 250 |
]:
|
| 251 |
comp.change(
|
| 252 |
update_generation_params,
|
| 253 |
inputs=[
|
| 254 |
+
self.temperature_slider, self.top_p_slider,
|
| 255 |
self.max_tokens_box, self.seed_box, self.frequency_penalty,
|
| 256 |
+
self.presence_penalty,
|
| 257 |
self.stop_sequences, self.reasoning_effort, self.response_format,
|
| 258 |
self.json_schema_name, self.json_schema_description, self.json_schema_editor, self.json_schema_strict,
|
| 259 |
+
self.tool_choice, self.tool_function_name, self.tool_prompt, self.tools_json
|
|
|
|
| 260 |
],
|
| 261 |
outputs=[self.gen_param_status]
|
| 262 |
)
|