rdune71 commited on
Commit
c0ef6d4
·
1 Parent(s): 84ae379

Add session management, weather service, and TTS modules

Browse files
Files changed (5) hide show
  1. README.md +28 -16
  2. core/session.py +99 -0
  3. requirements.txt +1 -0
  4. services/tts.py +88 -0
  5. services/weather.py +114 -0
README.md CHANGED
@@ -20,6 +20,9 @@ Your personal AI-powered life coaching assistant.
20
  - Multiple LLM provider support (Ollama, Hugging Face, OpenAI)
21
  - Dynamic model selection
22
  - Remote Ollama integration via ngrok
 
 
 
23
 
24
  ## How to Use
25
 
@@ -30,33 +33,42 @@ Your personal AI-powered life coaching assistant.
30
 
31
  ## Requirements
32
 
33
- All requirements are specified in `requirements.txt`. The app automatically handles:
34
  - Streamlit UI
35
  - FastAPI backend (for future expansion)
36
  - Redis connection for persistent memory
37
  - Multiple LLM integrations
 
 
38
 
39
  ## Environment Variables
40
 
41
- Configure these in your Hugging Face Space secrets or local `.env` file:
42
 
43
- - `OLLAMA_HOST`: Your Ollama server URL (default: ngrok URL)
44
- - `LOCAL_MODEL_NAME`: Default model name (default: mistral)
45
- - `HF_TOKEN`: Hugging Face API token (for Hugging Face models)
46
- - `HF_API_ENDPOINT_URL`: Hugging Face inference API endpoint
47
- - `USE_FALLBACK`: Whether to use fallback providers (true/false)
48
- - `REDIS_HOST`: Redis server hostname (default: localhost)
49
- - `REDIS_PORT`: Redis server port (default: 6379)
50
- - `REDIS_USERNAME`: Redis username (optional)
51
- - `REDIS_PASSWORD`: Redis password (optional)
 
 
52
 
53
  ## Architecture
54
 
55
  This application consists of:
56
- - Streamlit frontend (`app.py`)
57
- - Core LLM abstraction (`core/llm.py`)
58
- - Memory management (`core/memory.py`)
59
- - Configuration management (`utils/config.py`)
60
- - API endpoints (in `api/` directory for future expansion)
 
 
 
 
 
61
 
62
  Built with Python, Streamlit, FastAPI, and Redis.
 
20
  - Multiple LLM provider support (Ollama, Hugging Face, OpenAI)
21
  - Dynamic model selection
22
  - Remote Ollama integration via ngrok
23
+ - Session management
24
+ - Weather information integration
25
+ - Text-to-speech capabilities
26
 
27
  ## How to Use
28
 
 
33
 
34
  ## Requirements
35
 
36
+ All requirements are specified in . The app automatically handles:
37
  - Streamlit UI
38
  - FastAPI backend (for future expansion)
39
  - Redis connection for persistent memory
40
  - Multiple LLM integrations
41
+ - Weather service integration
42
+ - Text-to-speech capabilities
43
 
44
  ## Environment Variables
45
 
46
+ Configure these in your Hugging Face Space secrets or local file:
47
 
48
+ - : Your Ollama server URL (default: ngrok URL)
49
+ - : Default model name (default: mistral)
50
+ - : Hugging Face API token (for Hugging Face models and TTS)
51
+ - : Hugging Face inference API endpoint
52
+ - : Whether to use fallback providers (true/false)
53
+ - : Redis server hostname (default: localhost)
54
+ - : Redis server port (default: 6379)
55
+ - : Redis username (optional)
56
+ - : Redis password (optional)
57
+ - : OpenWeather API key (for weather features)
58
+ - : Tavily API key (for web search)
59
 
60
  ## Architecture
61
 
62
  This application consists of:
63
+ - Streamlit frontend ()
64
+ - Core LLM abstraction ()
65
+ - Memory management ()
66
+ - Session management ()
67
+ - Configuration management ()
68
+ - API endpoints (in directory for future expansion)
69
+ - Services ( directory):
70
+ - Weather service ()
71
+ - Text-to-speech service ()
72
+ - Ollama monitor ()
73
 
74
  Built with Python, Streamlit, FastAPI, and Redis.
core/session.py CHANGED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import time
3
+ from typing import Dict, Any, Optional
4
+ from core.memory import load_user_state, save_user_state
5
+
6
+ class SessionManager:
7
+ """Manages user sessions and conversation context"""
8
+
9
+ def __init__(self, session_timeout: int = 3600):
10
+ """
11
+ Initialize session manager
12
+
13
+ Args:
14
+ session_timeout: Session timeout in seconds (default: 1 hour)
15
+ """
16
+ self.session_timeout = session_timeout
17
+
18
+ def get_session(self, user_id: str) -> Dict[str, Any]:
19
+ """
20
+ Retrieve user session data
21
+
22
+ Args:
23
+ user_id: Unique identifier for the user
24
+
25
+ Returns:
26
+ Dictionary containing session data
27
+ """
28
+ try:
29
+ state = load_user_state(user_id)
30
+ if not state:
31
+ return self._create_new_session()
32
+
33
+ # Check if session has expired
34
+ last_activity = state.get('last_activity', 0)
35
+ if time.time() - last_activity > self.session_timeout:
36
+ return self._create_new_session()
37
+
38
+ return state
39
+ except Exception as e:
40
+ print(f"Error retrieving session for user {user_id}: {e}")
41
+ return self._create_new_session()
42
+
43
+ def update_session(self, user_id: str, data: Dict[str, Any]) -> bool:
44
+ """
45
+ Update user session data
46
+
47
+ Args:
48
+ user_id: Unique identifier for the user
49
+ data: Data to update in the session
50
+
51
+ Returns:
52
+ Boolean indicating success
53
+ """
54
+ try:
55
+ # Get existing session
56
+ session = self.get_session(user_id)
57
+
58
+ # Update with new data
59
+ session.update(data)
60
+ session['last_activity'] = time.time()
61
+
62
+ # Save updated session
63
+ return save_user_state(user_id, session)
64
+ except Exception as e:
65
+ print(f"Error updating session for user {user_id}: {e}")
66
+ return False
67
+
68
+ def clear_session(self, user_id: str) -> bool:
69
+ """
70
+ Clear user session data
71
+
72
+ Args:
73
+ user_id: Unique identifier for the user
74
+
75
+ Returns:
76
+ Boolean indicating success
77
+ """
78
+ try:
79
+ return save_user_state(user_id, {})
80
+ except Exception as e:
81
+ print(f"Error clearing session for user {user_id}: {e}")
82
+ return False
83
+
84
+ def _create_new_session(self) -> Dict[str, Any]:
85
+ """
86
+ Create a new session with default values
87
+
88
+ Returns:
89
+ Dictionary containing new session data
90
+ """
91
+ return {
92
+ 'conversation': [],
93
+ 'preferences': {},
94
+ 'last_activity': time.time(),
95
+ 'created_at': time.time()
96
+ }
97
+
98
+ # Global session manager instance
99
+ session_manager = SessionManager()
requirements.txt CHANGED
@@ -7,3 +7,4 @@ openai==1.35.6
7
  tavily-python>=0.1.0,<1.0.0
8
  requests==2.31.0
9
  docker==6.1.3
 
 
7
  tavily-python>=0.1.0,<1.0.0
8
  requests==2.31.0
9
  docker==6.1.3
10
+ pygame==2.5.2
services/tts.py CHANGED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from typing import Optional
4
+ from utils.config import config
5
+
6
+ class TTSService:
7
+ """Service for converting text to speech"""
8
+
9
+ def __init__(self):
10
+ self.hf_token = config.hf_token
11
+ self.tts_model = "facebook/fastspeech2-en-ljspeech"
12
+ self.vocoder_model = "facebook/hifigan-universal"
13
+
14
+ def synthesize_speech(self, text: str) -> Optional[bytes]:
15
+ """
16
+ Convert text to speech using Hugging Face API
17
+
18
+ Args:
19
+ text: Text to convert to speech
20
+
21
+ Returns:
22
+ Audio bytes or None if failed
23
+ """
24
+ if not self.hf_token:
25
+ print("Hugging Face token not configured for TTS")
26
+ return None
27
+
28
+ try:
29
+ # First, generate speech with text-to-speech model
30
+ tts_headers = {
31
+ "Authorization": f"Bearer {self.hf_token}"
32
+ }
33
+
34
+ tts_payload = {
35
+ "inputs": text
36
+ }
37
+
38
+ tts_response = requests.post(
39
+ f"https://api-inference.huggingface.co/models/{self.tts_model}",
40
+ headers=tts_headers,
41
+ json=tts_payload
42
+ )
43
+
44
+ if tts_response.status_code != 200:
45
+ print(f"TTS model error: {tts_response.status_code} - {tts_response.text}")
46
+ return None
47
+
48
+ # Then, convert to audio with vocoder
49
+ vocoder_response = requests.post(
50
+ f"https://api-inference.huggingface.co/models/{self.vocoder_model}",
51
+ headers=tts_headers,
52
+ data=tts_response.content
53
+ )
54
+
55
+ if vocoder_response.status_code == 200:
56
+ return vocoder_response.content
57
+ else:
58
+ print(f"Vocoder error: {vocoder_response.status_code} - {vocoder_response.text}")
59
+ return None
60
+
61
+ except Exception as e:
62
+ print(f"Error synthesizing speech: {e}")
63
+ return None
64
+
65
+ def save_audio_file(self, text: str, filename: str) -> bool:
66
+ """
67
+ Synthesize speech and save to file
68
+
69
+ Args:
70
+ text: Text to convert to speech
71
+ filename: Output filename (.wav)
72
+
73
+ Returns:
74
+ Boolean indicating success
75
+ """
76
+ audio_data = self.synthesize_speech(text)
77
+ if audio_data:
78
+ try:
79
+ with open(filename, 'wb') as f:
80
+ f.write(audio_data)
81
+ return True
82
+ except Exception as e:
83
+ print(f"Error saving audio file: {e}")
84
+ return False
85
+ return False
86
+
87
+ # Global TTS service instance
88
+ tts_service = TTSService()
services/weather.py CHANGED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+ from typing import Optional, Dict, Any
4
+ from utils.config import config
5
+
6
+ class WeatherService:
7
+ """Service for fetching weather information"""
8
+
9
+ def __init__(self):
10
+ self.api_key = config.openweather_api_key
11
+ self.base_url = "http://api.openweathermap.org/data/2.5"
12
+
13
+ def get_current_weather(self, city: str) -> Optional[Dict[str, Any]]:
14
+ """
15
+ Get current weather for a city
16
+
17
+ Args:
18
+ city: Name of the city
19
+
20
+ Returns:
21
+ Dictionary with weather information or None if failed
22
+ """
23
+ if not self.api_key:
24
+ print("OpenWeather API key not configured")
25
+ return None
26
+
27
+ try:
28
+ params = {
29
+ 'q': city,
30
+ 'appid': self.api_key,
31
+ 'units': 'metric' # Celsius
32
+ }
33
+
34
+ response = requests.get(
35
+ f"{self.base_url}/weather",
36
+ params=params,
37
+ timeout=10
38
+ )
39
+
40
+ if response.status_code == 200:
41
+ data = response.json()
42
+ return {
43
+ 'city': data['name'],
44
+ 'country': data['sys']['country'],
45
+ 'temperature': data['main']['temp'],
46
+ 'feels_like': data['main']['feels_like'],
47
+ 'humidity': data['main']['humidity'],
48
+ 'description': data['weather'][0]['description'],
49
+ 'icon': data['weather'][0]['icon']
50
+ }
51
+ else:
52
+ print(f"Weather API error: {response.status_code} - {response.text}")
53
+ return None
54
+
55
+ except Exception as e:
56
+ print(f"Error fetching weather data: {e}")
57
+ return None
58
+
59
+ def get_forecast(self, city: str, days: int = 5) -> Optional[Dict[str, Any]]:
60
+ """
61
+ Get weather forecast for a city
62
+
63
+ Args:
64
+ city: Name of the city
65
+ days: Number of days to forecast (default: 5)
66
+
67
+ Returns:
68
+ Dictionary with forecast information or None if failed
69
+ """
70
+ if not self.api_key:
71
+ print("OpenWeather API key not configured")
72
+ return None
73
+
74
+ try:
75
+ params = {
76
+ 'q': city,
77
+ 'appid': self.api_key,
78
+ 'units': 'metric',
79
+ 'cnt': days
80
+ }
81
+
82
+ response = requests.get(
83
+ f"{self.base_url}/forecast",
84
+ params=params,
85
+ timeout=10
86
+ )
87
+
88
+ if response.status_code == 200:
89
+ data = response.json()
90
+ forecasts = []
91
+
92
+ for item in data['list']:
93
+ forecasts.append({
94
+ 'datetime': item['dt_txt'],
95
+ 'temperature': item['main']['temp'],
96
+ 'description': item['weather'][0]['description'],
97
+ 'icon': item['weather'][0]['icon']
98
+ })
99
+
100
+ return {
101
+ 'city': data['city']['name'],
102
+ 'country': data['city']['country'],
103
+ 'forecasts': forecasts
104
+ }
105
+ else:
106
+ print(f"Forecast API error: {response.status_code} - {response.text}")
107
+ return None
108
+
109
+ except Exception as e:
110
+ print(f"Error fetching forecast data: {e}")
111
+ return None
112
+
113
+ # Global weather service instance
114
+ weather_service = WeatherService()