rdune71 commited on
Commit
ef600c3
·
1 Parent(s): f09ddb7

Update system to use remote Ollama host with graceful fallback handling and sync with remote changes

Browse files
Files changed (2) hide show
  1. app.py +60 -74
  2. requirements.txt +3 -9
app.py CHANGED
@@ -1,77 +1,63 @@
1
- # Force redeploy trigger - version 1.1
2
- import streamlit as st
3
- from utils.config import config
4
- import requests
5
- import json
6
- from core.memory import load_user_state
7
-
8
- # Set page config
9
- st.set_page_config(page_title="AI Life Coach", page_icon="🧘", layout="centered")
10
-
11
- # Sidebar for user selection
12
- st.sidebar.title("🧘 AI Life Coach")
13
- user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])
14
- st.sidebar.markdown("---")
15
-
16
- # Fetch Ollama status
17
- def get_ollama_status():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  try:
19
- response = requests.get("http://localhost:8000/api/ollama-status")
20
- if response.status_code == 200:
21
- return response.json()
22
- except Exception:
23
- return {"running": False, "model_loaded": None}
24
-
25
- # After user selects name, load conversation history
26
- def get_conversation_history(user_id):
27
- user_state = load_user_state(user_id)
28
- if user_state and "conversation" in user_state:
29
- return json.loads(user_state["conversation"])
30
- return []
31
-
32
- ollama_status = get_ollama_status()
33
-
34
- # Display Ollama status
35
- if ollama_status["running"]:
36
- st.sidebar.success(f"🧠 Model Running: {ollama_status['model_loaded']}")
37
- else:
38
- st.sidebar.error("🧠 Ollama is not running or no model loaded.")
39
-
40
- # Main chat interface
41
- st.title("🧘 AI Life Coach")
42
- st.markdown("Talk to your personal development assistant.")
43
-
44
- if not ollama_status["running"]:
45
- st.warning("⚠️ Ollama is not running. Please start Ollama to use the AI Life Coach.")
46
- else:
47
- # Display conversation history
48
- conversation = get_conversation_history(user)
49
- for msg in conversation:
50
- role = msg["role"].capitalize()
51
- content = msg["content"]
52
- st.markdown(f"**{role}:** {content}")
53
 
54
- # Chat input
55
- user_input = st.text_input("Your message...", key="input")
56
- if st.button("Send"):
57
- if user_input.strip() == "":
58
- st.warning("Please enter a message.")
59
- else:
60
- # Display user message
61
- st.markdown(f"**You:** {user_input}")
62
 
63
- # Send to backend
64
- with st.spinner("AI Coach is thinking..."):
65
- try:
66
- response = requests.post(
67
- "http://localhost:8000/api/chat",
68
- json={"user_id": user, "message": user_input}
69
- )
70
- if response.status_code == 200:
71
- response_data = response.json()
72
- ai_response = response_data.get("response", "")
73
- st.markdown(f"**AI Coach:** {ai_response}")
74
- else:
75
- st.error("Failed to get response from AI Coach.")
76
- except Exception as e:
77
- st.error(f"Connection error: {e}")
 
1
+ from flask import Flask, render_template, request, jsonify
2
+ import os
3
+ import sys
4
+ from datetime import datetime
5
+
6
+ # Add src to Python path to import models
7
+ sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
8
+
9
+ app = Flask(__name__)
10
+
11
+ # Try to import dotenv, if not available, create a simple fallback
12
+ try:
13
+ from dotenv import load_dotenv
14
+ load_dotenv()
15
+ except ImportError:
16
+ def load_dotenv():
17
+ pass
18
+
19
+ # Path to your Obsidian vault (synced via OneDrive)
20
+ OBSIDIAN_PATH = os.path.expanduser("~/OneDrive/ObsidianVault")
21
+
22
+ def get_todays_journal():
23
+ """"Retrieve today's journal entry from Obsidian vault"""
24
+ today = datetime.now().strftime("%Y-%m-%d")
25
+ journal_path = os.path.join(OBSIDIAN_PATH, "Journal", f"{today}.md")
26
+
27
+ if os.path.exists(journal_path):
28
+ with open(journal_path, "r", encoding="utf-8") as f:
29
+ return f.read()
30
+ else:
31
+ return f"No journal entry found for {today}. Create one in your Obsidian vault!"
32
+
33
+ # Import and initialize the AI model
34
+ from src.models.model_factory import get_model
35
+ ai_model = get_model()
36
+
37
+ def ai_coach(prompt):
38
+ """"Main AI coaching function using the model factory"""
39
+ context = get_todays_journal()
40
+
41
  try:
42
+ return ai_model.generate_response(prompt, context)
43
+ except Exception as e:
44
+ return f"Error generating response: {str(e)}"
45
+
46
+ @app.route('/')
47
+ def index():
48
+ return render_template('index.html')
49
+
50
+ @app.route('/coach', methods=['POST'])
51
+ def coach():
52
+ data = request.get_json()
53
+ prompt = data.get('prompt', '')
54
+
55
+ if not prompt:
56
+ return jsonify({'error': 'No prompt provided'}), 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ response = ai_coach(prompt)
59
+ return jsonify({'response': response})
 
 
 
 
 
 
60
 
61
+ # Required for Hugging Face Spaces
62
+ if __name__ == '__main__':
63
+ app.run(debug=False, host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,9 +1,3 @@
1
- streamlit==1.24.0
2
- fastapi==0.95.0
3
- uvicorn==0.21.1
4
- redis==5.0.3
5
- python-dotenv==1.0.0
6
- openai==1.35.6
7
- tavily-python>=0.1.0,<1.0.0
8
- requests==2.31.0
9
- docker==6.1.3
 
1
+ flask==3.1.2
2
+ python-dotenv==1.1.1
3
+ openai==1.106.1