rdune71 commited on
Commit
5720799
·
1 Parent(s): 1ca7ef3

Fix app.py to use Streamlit implementation and update requirements

Browse files
Files changed (2) hide show
  1. app.py +87 -62
  2. requirements.txt +9 -3
app.py CHANGED
@@ -1,63 +1,88 @@
1
- from flask import Flask, render_template, request, jsonify
2
- import os
3
- import sys
4
- from datetime import datetime
5
-
6
- # Add src to Python path to import models
7
- sys.path.append(os.path.join(os.path.dirname(__file__), 'src'))
8
-
9
- app = Flask(__name__)
10
-
11
- # Try to import dotenv, if not available, create a simple fallback
12
- try:
13
- from dotenv import load_dotenv
14
- load_dotenv()
15
- except ImportError:
16
- def load_dotenv():
17
- pass
18
-
19
- # Path to your Obsidian vault (synced via OneDrive)
20
- OBSIDIAN_PATH = os.path.expanduser("~/OneDrive/ObsidianVault")
21
-
22
- def get_todays_journal():
23
- """"Retrieve today's journal entry from Obsidian vault"""
24
- today = datetime.now().strftime("%Y-%m-%d")
25
- journal_path = os.path.join(OBSIDIAN_PATH, "Journal", f"{today}.md")
26
-
27
- if os.path.exists(journal_path):
28
- with open(journal_path, "r", encoding="utf-8") as f:
29
- return f.read()
30
- else:
31
- return f"No journal entry found for {today}. Create one in your Obsidian vault!"
32
-
33
- # Import and initialize the AI model
34
- from src.models.model_factory import get_model
35
- ai_model = get_model()
36
-
37
- def ai_coach(prompt):
38
- """"Main AI coaching function using the model factory"""
39
- context = get_todays_journal()
40
-
41
  try:
42
- return ai_model.generate_response(prompt, context)
43
- except Exception as e:
44
- return f"Error generating response: {str(e)}"
45
-
46
- @app.route('/')
47
- def index():
48
- return render_template('index.html')
49
-
50
- @app.route('/coach', methods=['POST'])
51
- def coach():
52
- data = request.get_json()
53
- prompt = data.get('prompt', '')
54
-
55
- if not prompt:
56
- return jsonify({'error': 'No prompt provided'}), 400
57
-
58
- response = ai_coach(prompt)
59
- return jsonify({'response': response})
60
-
61
- # Required for Hugging Face Spaces
62
- if __name__ == '__main__':
63
- app.run(debug=False, host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Force redeploy trigger - version 1.1
2
+ import streamlit as st
3
+ from utils.config import config
4
+ import requests
5
+ import json
6
+ from core.memory import load_user_state
7
+
8
+ # Set page config
9
+ st.set_page_config(page_title="AI Life Coach", page_icon="🧘", layout="centered")
10
+
11
+ # Sidebar for user selection
12
+ st.sidebar.title("🧘 AI Life Coach")
13
+ user = st.sidebar.selectbox("Select User", ["Rob", "Sarah"])
14
+ st.sidebar.markdown("---")
15
+
16
+ # Fetch Ollama status
17
+ def get_ollama_status():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  try:
19
+ # Add headers to skip ngrok browser warning
20
+ headers = {
21
+ "ngrok-skip-browser-warning": "true",
22
+ "User-Agent": "AI-Life-Coach-App"
23
+ }
24
+ response = requests.get("http://localhost:8000/api/ollama-status", headers=headers)
25
+ if response.status_code == 200:
26
+ return response.json()
27
+ except Exception:
28
+ return {"running": False, "model_loaded": None}
29
+
30
+ # After user selects name, load conversation history
31
+ def get_conversation_history(user_id):
32
+ user_state = load_user_state(user_id)
33
+ if user_state and "conversation" in user_state:
34
+ return json.loads(user_state["conversation"])
35
+ return []
36
+
37
+ ollama_status = get_ollama_status()
38
+
39
+ # Display Ollama status
40
+ if ollama_status["running"]:
41
+ st.sidebar.success(f"🧠 Model Running: {ollama_status['model_loaded']}")
42
+ else:
43
+ st.sidebar.error("🧠 Ollama is not running or no model loaded.")
44
+
45
+ # Main chat interface
46
+ st.title("🧘 AI Life Coach")
47
+ st.markdown("Talk to your personal development assistant.")
48
+
49
+ if not ollama_status["running"]:
50
+ st.warning("⚠️ Ollama is not running. Please start Ollama to use the AI Life Coach.")
51
+ else:
52
+ # Display conversation history
53
+ conversation = get_conversation_history(user)
54
+ for msg in conversation:
55
+ role = msg["role"].capitalize()
56
+ content = msg["content"]
57
+ st.markdown(f"**{role}:** {content}")
58
+
59
+ # Chat input
60
+ user_input = st.text_input("Your message...", key="input")
61
+ if st.button("Send"):
62
+ if user_input.strip() == "":
63
+ st.warning("Please enter a message.")
64
+ else:
65
+ # Display user message
66
+ st.markdown(f"**You:** {user_input}")
67
+
68
+ # Send to backend
69
+ with st.spinner("AI Coach is thinking..."):
70
+ try:
71
+ # Add headers to skip ngrok browser warning
72
+ headers = {
73
+ "ngrok-skip-browser-warning": "true",
74
+ "User-Agent": "AI-Life-Coach-App"
75
+ }
76
+ response = requests.post(
77
+ "http://localhost:8000/api/chat",
78
+ json={"user_id": user, "message": user_input},
79
+ headers=headers
80
+ )
81
+ if response.status_code == 200:
82
+ response_data = response.json()
83
+ ai_response = response_data.get("response", "")
84
+ st.markdown(f"**AI Coach:** {ai_response}")
85
+ else:
86
+ st.error("Failed to get response from AI Coach.")
87
+ except Exception as e:
88
+ st.error(f"Connection error: {e}")
requirements.txt CHANGED
@@ -1,3 +1,9 @@
1
- flask==3.1.2
2
- python-dotenv==1.1.1
3
- openai==1.106.1
 
 
 
 
 
 
 
1
+ streamlit==1.24.0
2
+ fastapi==0.95.0
3
+ uvicorn==0.21.1
4
+ redis==5.0.3
5
+ python-dotenv==1.0.0
6
+ openai==1.35.6
7
+ tavily-python>=0.1.0,<1.0.0
8
+ requests==2.31.0
9
+ docker==6.1.3