Spaces:
Running
Running
Add error handling and try to improve the display in mobile UIs
Browse files
app.py
CHANGED
|
@@ -64,12 +64,20 @@ def chat_fn(message, history):
|
|
| 64 |
check_format(history, "messages")
|
| 65 |
|
| 66 |
# Create the streaming response
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
if is_reasoning:
|
| 75 |
history.append(gr.ChatMessage(
|
|
@@ -149,6 +157,10 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="green")) as demo:
|
|
| 149 |
padding: 0;
|
| 150 |
}
|
| 151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
@media (max-width: 800px) {
|
| 153 |
.responsive-row {
|
| 154 |
flex-direction: column;
|
|
@@ -160,6 +172,9 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="green")) as demo:
|
|
| 160 |
flex-direction: column;
|
| 161 |
align-items: flex-start;
|
| 162 |
}
|
|
|
|
|
|
|
|
|
|
| 163 |
}
|
| 164 |
""")
|
| 165 |
|
|
@@ -177,16 +192,18 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="green")) as demo:
|
|
| 177 |
with gr.Column(scale=4, min_width=0):
|
| 178 |
description_html = gr.HTML(description, elem_classes="model-message")
|
| 179 |
|
| 180 |
-
|
| 181 |
type="messages",
|
| 182 |
height="calc(100dvh - 280px)",
|
|
|
|
| 183 |
)
|
| 184 |
|
| 185 |
chat_interface = gr.ChatInterface(
|
| 186 |
chat_fn,
|
| 187 |
description="",
|
| 188 |
type="messages",
|
| 189 |
-
chatbot=
|
|
|
|
| 190 |
)
|
| 191 |
|
| 192 |
# Add this line to ensure the model is reset to default on page reload
|
|
@@ -197,7 +214,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="green")) as demo:
|
|
| 197 |
# Remove the "Model: " prefix to get the actual model name
|
| 198 |
actual_model_name = model_name.replace("Model: ", "")
|
| 199 |
desc = setup_model(actual_model_name)
|
| 200 |
-
|
| 201 |
return desc
|
| 202 |
|
| 203 |
|
|
|
|
| 64 |
check_format(history, "messages")
|
| 65 |
|
| 66 |
# Create the streaming response
|
| 67 |
+
try:
|
| 68 |
+
stream = client.chat.completions.create(
|
| 69 |
+
model=model_config.get('MODEL_NAME'),
|
| 70 |
+
messages=history,
|
| 71 |
+
temperature=0.8,
|
| 72 |
+
stream=True
|
| 73 |
+
)
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"Error: {e}")
|
| 76 |
+
yield gr.ChatMessage(
|
| 77 |
+
role="assistant",
|
| 78 |
+
content="😔 The model is unavailable at the moment. Please try again later.",
|
| 79 |
+
)
|
| 80 |
+
return
|
| 81 |
|
| 82 |
if is_reasoning:
|
| 83 |
history.append(gr.ChatMessage(
|
|
|
|
| 157 |
padding: 0;
|
| 158 |
}
|
| 159 |
|
| 160 |
+
.chatbot {
|
| 161 |
+
max-height: 1400px;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
@media (max-width: 800px) {
|
| 165 |
.responsive-row {
|
| 166 |
flex-direction: column;
|
|
|
|
| 172 |
flex-direction: column;
|
| 173 |
align-items: flex-start;
|
| 174 |
}
|
| 175 |
+
.chatbot {
|
| 176 |
+
max-height: 900px;
|
| 177 |
+
}
|
| 178 |
}
|
| 179 |
""")
|
| 180 |
|
|
|
|
| 192 |
with gr.Column(scale=4, min_width=0):
|
| 193 |
description_html = gr.HTML(description, elem_classes="model-message")
|
| 194 |
|
| 195 |
+
chatbot = gr.Chatbot(
|
| 196 |
type="messages",
|
| 197 |
height="calc(100dvh - 280px)",
|
| 198 |
+
elem_classes="chatbot",
|
| 199 |
)
|
| 200 |
|
| 201 |
chat_interface = gr.ChatInterface(
|
| 202 |
chat_fn,
|
| 203 |
description="",
|
| 204 |
type="messages",
|
| 205 |
+
chatbot=chatbot,
|
| 206 |
+
fill_height=True,
|
| 207 |
)
|
| 208 |
|
| 209 |
# Add this line to ensure the model is reset to default on page reload
|
|
|
|
| 214 |
# Remove the "Model: " prefix to get the actual model name
|
| 215 |
actual_model_name = model_name.replace("Model: ", "")
|
| 216 |
desc = setup_model(actual_model_name)
|
| 217 |
+
chatbot.clear() # Critical line
|
| 218 |
return desc
|
| 219 |
|
| 220 |
|