Fix chat response issues: st.experimental_rerun(), remove nested chat messages, increase session timeout
Browse files
app.py
CHANGED
|
@@ -257,6 +257,7 @@ if user_input and not st.session_state.is_processing:
|
|
| 257 |
if not is_valid:
|
| 258 |
st.error(validated_input)
|
| 259 |
st.session_state.is_processing = False
|
|
|
|
| 260 |
else:
|
| 261 |
st.session_state.is_processing = True
|
| 262 |
|
|
@@ -272,187 +273,182 @@ if user_input and not st.session_state.is_processing:
|
|
| 272 |
})
|
| 273 |
|
| 274 |
# Process AI response
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
#
|
| 285 |
-
|
| 286 |
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
|
|
|
| 291 |
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
status_placeholder.info("π°οΈ Beaming Query to Orbital Station...")
|
| 314 |
-
if config.hf_token:
|
| 315 |
-
# Check HF status first
|
| 316 |
-
hf_status = hf_monitor.check_endpoint_status()
|
| 317 |
-
if not hf_status['available']:
|
| 318 |
-
status_placeholder.info(personality.get_initializing_message())
|
| 319 |
-
|
| 320 |
-
hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
|
| 321 |
-
hf_response = hf_provider.generate(validated_input, conversation_history)
|
| 322 |
-
|
| 323 |
-
if hf_response:
|
| 324 |
-
# Display response
|
| 325 |
-
st.markdown(f"### π°οΈ Orbital Station Reports:\n{hf_response}")
|
| 326 |
-
st.session_state.messages.append({
|
| 327 |
-
"role": "assistant",
|
| 328 |
-
"content": hf_response,
|
| 329 |
-
"source": "orbital_station",
|
| 330 |
-
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 331 |
-
})
|
| 332 |
-
|
| 333 |
-
# Stage 3: Local Synthesis
|
| 334 |
-
status_placeholder.info("π± Cosmic Kitten Synthesizing Wisdom...")
|
| 335 |
-
|
| 336 |
-
# Update history with both responses
|
| 337 |
-
synthesis_history = conversation_history.copy()
|
| 338 |
-
synthesis_history.extend([
|
| 339 |
-
{"role": "assistant", "content": local_response},
|
| 340 |
-
{"role": "assistant", "content": hf_response, "source": "cloud"}
|
| 341 |
-
])
|
| 342 |
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
synthesis_history
|
| 346 |
-
)
|
| 347 |
|
| 348 |
-
if
|
| 349 |
-
# Display response
|
| 350 |
-
st.markdown(f"###
|
| 351 |
st.session_state.messages.append({
|
| 352 |
"role": "assistant",
|
| 353 |
-
"content":
|
| 354 |
-
"source": "
|
| 355 |
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 356 |
})
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
st.session_state.messages.append({
|
| 364 |
"role": "assistant",
|
| 365 |
-
"content":
|
| 366 |
-
"source": "
|
| 367 |
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 368 |
})
|
| 369 |
-
else:
|
| 370 |
-
# Traditional processing
|
| 371 |
-
# Try Ollama first
|
| 372 |
-
status_placeholder.info("π¦ Contacting Ollama...")
|
| 373 |
-
ai_response = None
|
| 374 |
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 379 |
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
status_placeholder.error(f"β οΈ {user_msg}")
|
| 389 |
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
# Check HF status first
|
| 396 |
-
hf_status = hf_monitor.check_endpoint_status()
|
| 397 |
-
if not hf_status['available']:
|
| 398 |
-
status_placeholder.info(personality.get_initializing_message())
|
| 399 |
-
|
| 400 |
-
# Use the HuggingFaceProvider directly
|
| 401 |
-
hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
|
| 402 |
-
ai_response = hf_provider.generate(validated_input, conversation_history)
|
| 403 |
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
status_placeholder.error(
|
| 413 |
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
# Force UI update
|
| 455 |
-
st.rerun() # Changed from experimental_rerun() to rerun()
|
| 456 |
|
| 457 |
# Add evaluation dashboard tab (separate from chat interface) - ONLY ABOUT TAB NOW
|
| 458 |
st.divider()
|
|
@@ -498,4 +494,4 @@ if user_input and user_input.lower().strip() in ["tell me a story", "tell me a c
|
|
| 498 |
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 499 |
})
|
| 500 |
st.session_state.is_processing = False
|
| 501 |
-
st.
|
|
|
|
| 257 |
if not is_valid:
|
| 258 |
st.error(validated_input)
|
| 259 |
st.session_state.is_processing = False
|
| 260 |
+
st.experimental_rerun() # Fixed: use experimental_rerun
|
| 261 |
else:
|
| 262 |
st.session_state.is_processing = True
|
| 263 |
|
|
|
|
| 273 |
})
|
| 274 |
|
| 275 |
# Process AI response
|
| 276 |
+
response_container = st.empty()
|
| 277 |
+
status_placeholder = st.empty()
|
| 278 |
+
response_placeholder = st.empty()
|
| 279 |
+
|
| 280 |
+
try:
|
| 281 |
+
# Get conversation history from session
|
| 282 |
+
user_session = session_manager.get_session("default_user")
|
| 283 |
+
conversation_history = user_session.get("conversation", []).copy()
|
| 284 |
|
| 285 |
+
# Add the current user message to history for context
|
| 286 |
+
conversation_history.append({"role": "user", "content": validated_input})
|
| 287 |
+
|
| 288 |
+
# Check if cosmic mode is enabled
|
| 289 |
+
if st.session_state.cosmic_mode:
|
| 290 |
+
# Process cosmic cascade response
|
| 291 |
+
status_placeholder.info("π± Cosmic Kitten Responding...")
|
| 292 |
|
| 293 |
+
try:
|
| 294 |
+
# Get conversation history
|
| 295 |
+
user_session = session_manager.get_session("default_user")
|
| 296 |
+
conversation_history = user_session.get("conversation", []).copy()
|
| 297 |
+
conversation_history.append({"role": "user", "content": validated_input})
|
| 298 |
|
| 299 |
+
# Stage 1: Local Ollama Response
|
| 300 |
+
ollama_provider = OllamaProvider(st.session_state.selected_model)
|
| 301 |
+
local_response = ollama_provider.generate(validated_input, conversation_history)
|
| 302 |
+
|
| 303 |
+
if local_response:
|
| 304 |
+
# Display response (no nested st.chat_message)
|
| 305 |
+
st.markdown(f"### π± Cosmic Kitten Says:\n{local_response}")
|
| 306 |
+
st.session_state.messages.append({
|
| 307 |
+
"role": "assistant",
|
| 308 |
+
"content": local_response,
|
| 309 |
+
"source": "local_kitty",
|
| 310 |
+
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 311 |
+
})
|
| 312 |
+
|
| 313 |
+
# Stage 2: HF Endpoint Analysis
|
| 314 |
+
status_placeholder.info("π°οΈ Beaming Query to Orbital Station...")
|
| 315 |
+
if config.hf_token:
|
| 316 |
+
# Check HF status first
|
| 317 |
+
hf_status = hf_monitor.check_endpoint_status()
|
| 318 |
+
if not hf_status['available']:
|
| 319 |
+
status_placeholder.info(personality.get_initializing_message())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 320 |
|
| 321 |
+
hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
|
| 322 |
+
hf_response = hf_provider.generate(validated_input, conversation_history)
|
|
|
|
|
|
|
| 323 |
|
| 324 |
+
if hf_response:
|
| 325 |
+
# Display response (no nested st.chat_message)
|
| 326 |
+
st.markdown(f"### π°οΈ Orbital Station Reports:\n{hf_response}")
|
| 327 |
st.session_state.messages.append({
|
| 328 |
"role": "assistant",
|
| 329 |
+
"content": hf_response,
|
| 330 |
+
"source": "orbital_station",
|
| 331 |
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 332 |
})
|
| 333 |
+
|
| 334 |
+
# Stage 3: Local Synthesis
|
| 335 |
+
status_placeholder.info("π± Cosmic Kitten Synthesizing Wisdom...")
|
| 336 |
+
|
| 337 |
+
# Update history with both responses
|
| 338 |
+
synthesis_history = conversation_history.copy()
|
| 339 |
+
synthesis_history.extend([
|
| 340 |
+
{"role": "assistant", "content": local_response},
|
| 341 |
+
{"role": "assistant", "content": hf_response, "source": "cloud"}
|
| 342 |
+
])
|
| 343 |
+
|
| 344 |
+
synthesis = ollama_provider.generate(
|
| 345 |
+
f"Synthesize these two perspectives:\n1. Local: {local_response}\n2. Cloud: {hf_response}",
|
| 346 |
+
synthesis_history
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
if synthesis:
|
| 350 |
+
# Display response (no nested st.chat_message)
|
| 351 |
+
st.markdown(f"### π Final Cosmic Summary:\n{synthesis}")
|
| 352 |
st.session_state.messages.append({
|
| 353 |
"role": "assistant",
|
| 354 |
+
"content": synthesis,
|
| 355 |
+
"source": "cosmic_summary",
|
| 356 |
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 357 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
|
| 359 |
+
status_placeholder.success("β¨ Cosmic Cascade Complete!")
|
| 360 |
+
|
| 361 |
+
except Exception as e:
|
| 362 |
+
error_msg = f"π Cosmic disturbance: {str(e)}"
|
| 363 |
+
st.error(error_msg)
|
| 364 |
+
st.session_state.messages.append({
|
| 365 |
+
"role": "assistant",
|
| 366 |
+
"content": error_msg,
|
| 367 |
+
"source": "error",
|
| 368 |
+
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 369 |
+
})
|
| 370 |
+
else:
|
| 371 |
+
# Traditional processing
|
| 372 |
+
# Try Ollama first
|
| 373 |
+
status_placeholder.info("π¦ Contacting Ollama...")
|
| 374 |
+
ai_response = None
|
| 375 |
+
|
| 376 |
+
try:
|
| 377 |
+
# Use the OllamaProvider directly with proper configuration
|
| 378 |
+
ollama_provider = OllamaProvider(st.session_state.selected_model)
|
| 379 |
+
ai_response = ollama_provider.generate(validated_input, conversation_history)
|
| 380 |
+
|
| 381 |
+
if ai_response:
|
| 382 |
+
st.markdown(ai_response) # Use st.markdown instead of response_placeholder
|
| 383 |
+
status_placeholder.success("β
Response received!")
|
| 384 |
+
else:
|
| 385 |
+
status_placeholder.warning("β οΈ Empty response from Ollama")
|
| 386 |
|
| 387 |
+
except Exception as ollama_error:
|
| 388 |
+
error_message = str(ollama_error)
|
| 389 |
+
status_placeholder.error(f"β Ollama error: {error_message[:100]}...")
|
| 390 |
+
logger.error(f"Ollama error: {error_message}")
|
| 391 |
+
|
| 392 |
+
# Fallback to HF if available
|
| 393 |
+
if config.hf_token and not ai_response:
|
| 394 |
+
status_placeholder.info("β‘ Initializing HF Endpoint (2β4 minutes)...")
|
|
|
|
| 395 |
|
| 396 |
+
try:
|
| 397 |
+
# Check HF status first
|
| 398 |
+
hf_status = hf_monitor.check_endpoint_status()
|
| 399 |
+
if not hf_status['available']:
|
| 400 |
+
status_placeholder.info(personality.get_initializing_message())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 401 |
|
| 402 |
+
# Use the HuggingFaceProvider directly
|
| 403 |
+
hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
|
| 404 |
+
ai_response = hf_provider.generate(validated_input, conversation_history)
|
| 405 |
+
|
| 406 |
+
if ai_response:
|
| 407 |
+
st.markdown(ai_response) # Use st.markdown instead of response_placeholder
|
| 408 |
+
status_placeholder.success("β
HF response received!")
|
| 409 |
+
else:
|
| 410 |
+
status_placeholder.error("β No response from HF")
|
| 411 |
|
| 412 |
+
except Exception as hf_error:
|
| 413 |
+
error_message = str(hf_error)
|
| 414 |
+
status_placeholder.error(f"β HF also failed: {error_message[:100]}...")
|
| 415 |
+
logger.error(f"HF error: {error_message}")
|
| 416 |
+
|
| 417 |
+
# Save response if successful
|
| 418 |
+
if ai_response:
|
| 419 |
+
# Update conversation history in session
|
| 420 |
+
conversation = user_session.get("conversation", []).copy()
|
| 421 |
+
conversation.append({"role": "user", "content": validated_input})
|
| 422 |
+
conversation.append({"role": "assistant", "content": ai_response})
|
| 423 |
+
session_manager.update_session("default_user", {"conversation": conversation})
|
| 424 |
+
|
| 425 |
+
# Add to message history
|
| 426 |
+
st.session_state.messages.append({
|
| 427 |
+
"role": "assistant",
|
| 428 |
+
"content": ai_response,
|
| 429 |
+
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 430 |
+
})
|
| 431 |
+
else:
|
| 432 |
+
error_msg = "Sorry, I couldn't process your request. Please try again."
|
| 433 |
+
st.session_state.messages.append({
|
| 434 |
+
"role": "assistant",
|
| 435 |
+
"content": error_msg,
|
| 436 |
+
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 437 |
+
})
|
| 438 |
+
st.markdown(error_msg)
|
| 439 |
+
|
| 440 |
+
except Exception as e:
|
| 441 |
+
error_msg = f"System error: {str(e)}"
|
| 442 |
+
logger.error(f"Chat processing error: {error_msg}")
|
| 443 |
+
st.error(error_msg)
|
| 444 |
+
st.session_state.messages.append({
|
| 445 |
+
"role": "assistant",
|
| 446 |
+
"content": error_msg,
|
| 447 |
+
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 448 |
+
})
|
| 449 |
+
finally:
|
| 450 |
+
st.session_state.is_processing = False
|
| 451 |
+
st.experimental_rerun() # Fixed: use experimental_rerun
|
|
|
|
|
|
|
| 452 |
|
| 453 |
# Add evaluation dashboard tab (separate from chat interface) - ONLY ABOUT TAB NOW
|
| 454 |
st.divider()
|
|
|
|
| 494 |
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 495 |
})
|
| 496 |
st.session_state.is_processing = False
|
| 497 |
+
st.experimental_rerun()
|