rdune71 commited on
Commit
b40bdef
Β·
1 Parent(s): 737aa03

Fix import errors in app.py by using provider classes directly instead of legacy functions

Browse files
Files changed (1) hide show
  1. app.py +39 -47
app.py CHANGED
@@ -8,7 +8,6 @@ from datetime import datetime
8
  from pathlib import Path
9
  sys.path.append(str(Path(__file__).parent))
10
  from utils.config import config
11
- from core.llm import send_to_ollama, send_to_hf
12
  from core.session import session_manager
13
  from core.memory import check_redis_health
14
  from core.coordinator import coordinator
@@ -16,6 +15,9 @@ from core.errors import translate_error
16
  from core.personality import personality
17
  from services.hf_endpoint_monitor import hf_monitor
18
  from services.weather import weather_service
 
 
 
19
  import logging
20
 
21
  # Set up logging
@@ -78,21 +80,14 @@ with st.sidebar:
78
 
79
  if st.button("πŸ“‘ Test Connection"):
80
  try:
81
- import requests
82
- headers = {
83
- "ngrok-skip-browser-warning": "true",
84
- "User-Agent": "CosmicCat-Test"
85
- }
86
- with st.spinner("Testing connection..."):
87
- response = requests.get(
88
- f"{ngrok_url_input}/api/tags",
89
- headers=headers,
90
- timeout=15
91
- )
92
- if response.status_code == 200:
93
- st.success("βœ… Connection successful!")
94
- else:
95
- st.error(f"❌ Failed: {response.status_code}")
96
  except Exception as e:
97
  st.error(f"❌ Error: {str(e)[:50]}...")
98
 
@@ -282,10 +277,11 @@ if user_input and not st.session_state.is_processing:
282
  response_placeholder = st.empty()
283
 
284
  try:
285
- # Get conversation history
286
  user_session = session_manager.get_session("default_user")
287
- conversation = user_session.get("conversation", [])
288
- conversation_history = conversation[-5:] # Last 5 messages
 
289
  conversation_history.append({"role": "user", "content": validated_input})
290
 
291
  # Check if cosmic mode is enabled
@@ -297,17 +293,14 @@ if user_input and not st.session_state.is_processing:
297
  # Get conversation history
298
  user_session = session_manager.get_session("default_user")
299
  conversation_history = user_session.get("conversation", []).copy()
300
-
 
301
  # Stage 1: Local Ollama Response
302
- local_response = send_to_ollama(
303
- validated_input,
304
- conversation_history,
305
- st.session_state.ngrok_url_temp,
306
- st.session_state.selected_model
307
- )
308
-
309
  if local_response:
310
- # Display response (no nested chat_message)
311
  st.markdown(f"### 🐱 Cosmic Kitten Says:\n{local_response}")
312
  st.session_state.messages.append({
313
  "role": "assistant",
@@ -315,7 +308,7 @@ if user_input and not st.session_state.is_processing:
315
  "source": "local_kitty",
316
  "timestamp": datetime.now().strftime("%H:%M:%S")
317
  })
318
-
319
  # Stage 2: HF Endpoint Analysis
320
  status_placeholder.info("πŸ›°οΈ Beaming Query to Orbital Station...")
321
  if config.hf_token:
@@ -323,11 +316,12 @@ if user_input and not st.session_state.is_processing:
323
  hf_status = hf_monitor.check_endpoint_status()
324
  if not hf_status['available']:
325
  status_placeholder.info(personality.get_initializing_message())
326
-
327
- hf_response = send_to_hf(validated_input, conversation_history)
 
328
 
329
  if hf_response:
330
- # Display response (no nested chat_message)
331
  st.markdown(f"### πŸ›°οΈ Orbital Station Reports:\n{hf_response}")
332
  st.session_state.messages.append({
333
  "role": "assistant",
@@ -335,7 +329,7 @@ if user_input and not st.session_state.is_processing:
335
  "source": "orbital_station",
336
  "timestamp": datetime.now().strftime("%H:%M:%S")
337
  })
338
-
339
  # Stage 3: Local Synthesis
340
  status_placeholder.info("🐱 Cosmic Kitten Synthesizing Wisdom...")
341
 
@@ -346,15 +340,13 @@ if user_input and not st.session_state.is_processing:
346
  {"role": "assistant", "content": hf_response, "source": "cloud"}
347
  ])
348
 
349
- synthesis = send_to_ollama(
350
  f"Synthesize these two perspectives:\n1. Local: {local_response}\n2. Cloud: {hf_response}",
351
- synthesis_history,
352
- st.session_state.ngrok_url_temp,
353
- st.session_state.selected_model
354
  )
355
 
356
  if synthesis:
357
- # Display response (no nested chat_message)
358
  st.markdown(f"### 🌟 Final Cosmic Summary:\n{synthesis}")
359
  st.session_state.messages.append({
360
  "role": "assistant",
@@ -362,7 +354,7 @@ if user_input and not st.session_state.is_processing:
362
  "source": "cosmic_summary",
363
  "timestamp": datetime.now().strftime("%H:%M:%S")
364
  })
365
-
366
  status_placeholder.success("✨ Cosmic Cascade Complete!")
367
 
368
  except Exception as e:
@@ -376,17 +368,14 @@ if user_input and not st.session_state.is_processing:
376
  })
377
  else:
378
  # Traditional processing
379
- # Try Ollama with proper error handling
380
  status_placeholder.info("πŸ¦™ Contacting Ollama...")
381
  ai_response = None
382
 
383
  try:
384
- ai_response = send_to_ollama(
385
- validated_input,
386
- conversation_history,
387
- st.session_state.ngrok_url_temp,
388
- st.session_state.selected_model
389
- )
390
 
391
  if ai_response:
392
  st.markdown(ai_response) # Use st.markdown instead of response_placeholder
@@ -408,7 +397,9 @@ if user_input and not st.session_state.is_processing:
408
  if not hf_status['available']:
409
  status_placeholder.info(personality.get_initializing_message())
410
 
411
- ai_response = send_to_hf(validated_input, conversation_history)
 
 
412
 
413
  if ai_response:
414
  st.markdown(ai_response) # Use st.markdown instead of response_placeholder
@@ -423,6 +414,7 @@ if user_input and not st.session_state.is_processing:
423
  # Save response if successful
424
  if ai_response:
425
  # Update conversation history
 
426
  conversation.append({"role": "user", "content": validated_input})
427
  conversation.append({"role": "assistant", "content": ai_response})
428
  user_session["conversation"] = conversation
 
8
  from pathlib import Path
9
  sys.path.append(str(Path(__file__).parent))
10
  from utils.config import config
 
11
  from core.session import session_manager
12
  from core.memory import check_redis_health
13
  from core.coordinator import coordinator
 
15
  from core.personality import personality
16
  from services.hf_endpoint_monitor import hf_monitor
17
  from services.weather import weather_service
18
+ from core.llm import LLMClient
19
+ from core.providers.ollama import OllamaProvider
20
+ from core.providers.huggingface import HuggingFaceProvider
21
  import logging
22
 
23
  # Set up logging
 
80
 
81
  if st.button("πŸ“‘ Test Connection"):
82
  try:
83
+ # Use OllamaProvider to test connection
84
+ ollama_provider = OllamaProvider(st.session_state.selected_model)
85
+ # Test model validation
86
+ is_valid = ollama_provider.validate_model()
87
+ if is_valid:
88
+ st.success("βœ… Connection successful!")
89
+ else:
90
+ st.error("❌ Model validation failed")
 
 
 
 
 
 
 
91
  except Exception as e:
92
  st.error(f"❌ Error: {str(e)[:50]}...")
93
 
 
277
  response_placeholder = st.empty()
278
 
279
  try:
280
+ # Get conversation history from session
281
  user_session = session_manager.get_session("default_user")
282
+ conversation_history = user_session.get("conversation", []).copy()
283
+
284
+ # Add the current user message to history for context
285
  conversation_history.append({"role": "user", "content": validated_input})
286
 
287
  # Check if cosmic mode is enabled
 
293
  # Get conversation history
294
  user_session = session_manager.get_session("default_user")
295
  conversation_history = user_session.get("conversation", []).copy()
296
+ conversation_history.append({"role": "user", "content": validated_input})
297
+
298
  # Stage 1: Local Ollama Response
299
+ ollama_provider = OllamaProvider(st.session_state.selected_model)
300
+ local_response = ollama_provider.generate(validated_input, conversation_history)
301
+
 
 
 
 
302
  if local_response:
303
+ # Display response
304
  st.markdown(f"### 🐱 Cosmic Kitten Says:\n{local_response}")
305
  st.session_state.messages.append({
306
  "role": "assistant",
 
308
  "source": "local_kitty",
309
  "timestamp": datetime.now().strftime("%H:%M:%S")
310
  })
311
+
312
  # Stage 2: HF Endpoint Analysis
313
  status_placeholder.info("πŸ›°οΈ Beaming Query to Orbital Station...")
314
  if config.hf_token:
 
316
  hf_status = hf_monitor.check_endpoint_status()
317
  if not hf_status['available']:
318
  status_placeholder.info(personality.get_initializing_message())
319
+
320
+ hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
321
+ hf_response = hf_provider.generate(validated_input, conversation_history)
322
 
323
  if hf_response:
324
+ # Display response
325
  st.markdown(f"### πŸ›°οΈ Orbital Station Reports:\n{hf_response}")
326
  st.session_state.messages.append({
327
  "role": "assistant",
 
329
  "source": "orbital_station",
330
  "timestamp": datetime.now().strftime("%H:%M:%S")
331
  })
332
+
333
  # Stage 3: Local Synthesis
334
  status_placeholder.info("🐱 Cosmic Kitten Synthesizing Wisdom...")
335
 
 
340
  {"role": "assistant", "content": hf_response, "source": "cloud"}
341
  ])
342
 
343
+ synthesis = ollama_provider.generate(
344
  f"Synthesize these two perspectives:\n1. Local: {local_response}\n2. Cloud: {hf_response}",
345
+ synthesis_history
 
 
346
  )
347
 
348
  if synthesis:
349
+ # Display response
350
  st.markdown(f"### 🌟 Final Cosmic Summary:\n{synthesis}")
351
  st.session_state.messages.append({
352
  "role": "assistant",
 
354
  "source": "cosmic_summary",
355
  "timestamp": datetime.now().strftime("%H:%M:%S")
356
  })
357
+
358
  status_placeholder.success("✨ Cosmic Cascade Complete!")
359
 
360
  except Exception as e:
 
368
  })
369
  else:
370
  # Traditional processing
371
+ # Try Ollama first
372
  status_placeholder.info("πŸ¦™ Contacting Ollama...")
373
  ai_response = None
374
 
375
  try:
376
+ # Use the OllamaProvider directly
377
+ ollama_provider = OllamaProvider(st.session_state.selected_model)
378
+ ai_response = ollama_provider.generate(validated_input, conversation_history)
 
 
 
379
 
380
  if ai_response:
381
  st.markdown(ai_response) # Use st.markdown instead of response_placeholder
 
397
  if not hf_status['available']:
398
  status_placeholder.info(personality.get_initializing_message())
399
 
400
+ # Use the HuggingFaceProvider directly
401
+ hf_provider = HuggingFaceProvider("meta-llama/Llama-2-7b-chat-hf")
402
+ ai_response = hf_provider.generate(validated_input, conversation_history)
403
 
404
  if ai_response:
405
  st.markdown(ai_response) # Use st.markdown instead of response_placeholder
 
414
  # Save response if successful
415
  if ai_response:
416
  # Update conversation history
417
+ conversation = user_session.get("conversation", [])
418
  conversation.append({"role": "user", "content": validated_input})
419
  conversation.append({"role": "assistant", "content": ai_response})
420
  user_session["conversation"] = conversation