Spaces:
Runtime error
Runtime error
production-ready caching a
Browse files- modules/__init__.py +2 -2
- modules/analyzer.py +11 -5
- modules/server_cache.py +38 -15
- modules/status_logger.py +25 -0
- requirements.txt +3 -1
modules/__init__.py
CHANGED
|
@@ -7,6 +7,6 @@ from .retriever import Retriever
|
|
| 7 |
from .analyzer import Analyzer
|
| 8 |
from .citation import CitationManager
|
| 9 |
from .formatter import OutputFormatter
|
| 10 |
-
from .server_cache import
|
| 11 |
|
| 12 |
-
__all__ = ['InputHandler', 'Retriever', 'Analyzer', 'CitationManager', 'OutputFormatter', '
|
|
|
|
| 7 |
from .analyzer import Analyzer
|
| 8 |
from .citation import CitationManager
|
| 9 |
from .formatter import OutputFormatter
|
| 10 |
+
from .server_cache import RedisServerStatusCache
|
| 11 |
|
| 12 |
+
__all__ = ['InputHandler', 'Retriever', 'Analyzer', 'CitationManager', 'OutputFormatter', 'RedisServerStatusCache']
|
modules/analyzer.py
CHANGED
|
@@ -3,7 +3,8 @@ from openai import OpenAI
|
|
| 3 |
import requests
|
| 4 |
import time
|
| 5 |
import logging
|
| 6 |
-
from modules.server_cache import
|
|
|
|
| 7 |
|
| 8 |
class Analyzer:
|
| 9 |
def __init__(self, base_url, api_key):
|
|
@@ -15,9 +16,12 @@ class Analyzer:
|
|
| 15 |
self.headers = {"Authorization": f"Bearer {api_key}"}
|
| 16 |
self.cache_key = f"server_status_{base_url}"
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
def is_server_ready(self):
|
| 19 |
# Check cache first
|
| 20 |
-
cached_status =
|
| 21 |
if cached_status is not None:
|
| 22 |
return cached_status
|
| 23 |
|
|
@@ -25,10 +29,12 @@ class Analyzer:
|
|
| 25 |
try:
|
| 26 |
response = requests.get(self.health_check_url, headers=self.headers, timeout=5)
|
| 27 |
is_ready = response.status_code == 200
|
| 28 |
-
|
|
|
|
| 29 |
return is_ready
|
| 30 |
-
except requests.exceptions.RequestException:
|
| 31 |
-
|
|
|
|
| 32 |
return False
|
| 33 |
|
| 34 |
def wait_for_server(self, timeout=180, interval=10):
|
|
|
|
| 3 |
import requests
|
| 4 |
import time
|
| 5 |
import logging
|
| 6 |
+
from modules.server_cache import RedisServerStatusCache
|
| 7 |
+
from modules.status_logger import log_server_status
|
| 8 |
|
| 9 |
class Analyzer:
|
| 10 |
def __init__(self, base_url, api_key):
|
|
|
|
| 16 |
self.headers = {"Authorization": f"Bearer {api_key}"}
|
| 17 |
self.cache_key = f"server_status_{base_url}"
|
| 18 |
|
| 19 |
+
# Connect to Redis cache
|
| 20 |
+
self.cache = RedisServerStatusCache()
|
| 21 |
+
|
| 22 |
def is_server_ready(self):
|
| 23 |
# Check cache first
|
| 24 |
+
cached_status = self.cache.get(self.cache_key)
|
| 25 |
if cached_status is not None:
|
| 26 |
return cached_status
|
| 27 |
|
|
|
|
| 29 |
try:
|
| 30 |
response = requests.get(self.health_check_url, headers=self.headers, timeout=5)
|
| 31 |
is_ready = response.status_code == 200
|
| 32 |
+
self.cache.set(self.cache_key, is_ready)
|
| 33 |
+
log_server_status(self.cache_key, is_ready)
|
| 34 |
return is_ready
|
| 35 |
+
except requests.exceptions.RequestException as e:
|
| 36 |
+
self.cache.set(self.cache_key, False)
|
| 37 |
+
log_server_status(self.cache_key, False)
|
| 38 |
return False
|
| 39 |
|
| 40 |
def wait_for_server(self, timeout=180, interval=10):
|
modules/server_cache.py
CHANGED
|
@@ -1,22 +1,45 @@
|
|
| 1 |
# modules/server_cache.py
|
| 2 |
import time
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
class
|
| 5 |
-
def __init__(self,
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
def get(self, server_key):
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
return status
|
| 14 |
else:
|
| 15 |
-
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
def set(self, server_key, status):
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# modules/server_cache.py
|
| 2 |
import time
|
| 3 |
+
import redis
|
| 4 |
+
import json
|
| 5 |
|
| 6 |
+
class RedisServerStatusCache:
|
| 7 |
+
def __init__(self, host='localhost', port=6379, db=0, default_ttl=300):
|
| 8 |
+
try:
|
| 9 |
+
self.redis = redis.StrictRedis(host=host, port=port, db=db, decode_responses=True)
|
| 10 |
+
# Test connection
|
| 11 |
+
self.redis.ping()
|
| 12 |
+
except redis.ConnectionError:
|
| 13 |
+
# Fallback to in-memory cache if Redis is not available
|
| 14 |
+
self.redis = None
|
| 15 |
+
self.fallback_cache = {}
|
| 16 |
+
self.default_ttl = default_ttl
|
| 17 |
|
| 18 |
def get(self, server_key):
|
| 19 |
+
try:
|
| 20 |
+
if self.redis:
|
| 21 |
+
status = self.redis.get(f"server_status:{server_key}")
|
| 22 |
+
return json.loads(status) if status else None
|
| 23 |
else:
|
| 24 |
+
# Fallback to in-memory cache
|
| 25 |
+
entry = self.fallback_cache.get(server_key)
|
| 26 |
+
if entry:
|
| 27 |
+
timestamp, status = entry
|
| 28 |
+
if time.time() - timestamp < self.default_ttl:
|
| 29 |
+
return status
|
| 30 |
+
else:
|
| 31 |
+
del self.fallback_cache[server_key]
|
| 32 |
+
return None
|
| 33 |
+
except Exception:
|
| 34 |
+
return None
|
| 35 |
|
| 36 |
+
def set(self, server_key, status, ttl=None):
|
| 37 |
+
try:
|
| 38 |
+
ttl = ttl or self.default_ttl
|
| 39 |
+
if self.redis:
|
| 40 |
+
self.redis.setex(f"server_status:{server_key}", ttl, json.dumps(status))
|
| 41 |
+
else:
|
| 42 |
+
# Fallback to in-memory cache
|
| 43 |
+
self.fallback_cache[server_key] = (time.time(), status)
|
| 44 |
+
except Exception:
|
| 45 |
+
pass # Silently fail if cache is not available
|
modules/status_logger.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
STATUS_LOG_FILE = "server_status_log.csv"
|
| 6 |
+
|
| 7 |
+
# Set up logging
|
| 8 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 9 |
+
|
| 10 |
+
def log_server_status(server_key, status):
|
| 11 |
+
timestamp = datetime.now().isoformat()
|
| 12 |
+
status_str = "UP" if status else "DOWN"
|
| 13 |
+
|
| 14 |
+
# Log to console
|
| 15 |
+
logging.info(f"Server {server_key} is {status_str}")
|
| 16 |
+
|
| 17 |
+
# Log to file
|
| 18 |
+
try:
|
| 19 |
+
file_exists = os.path.exists(STATUS_LOG_FILE)
|
| 20 |
+
with open(STATUS_LOG_FILE, 'a') as f:
|
| 21 |
+
if not file_exists:
|
| 22 |
+
f.write("timestamp,server,status\n")
|
| 23 |
+
f.write(f"{timestamp},{server_key},{status_str}\n")
|
| 24 |
+
except Exception as e:
|
| 25 |
+
logging.error(f"Failed to log server status: {e}")
|
requirements.txt
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
|
|
| 1 |
gradio>=3.40.0
|
| 2 |
tavily-python>=0.3.0
|
| 3 |
openai>=1.0.0
|
| 4 |
requests>=2.31.0
|
| 5 |
-
pytest==8.3.3
|
|
|
|
|
|
| 1 |
+
# requirements.txt
|
| 2 |
gradio>=3.40.0
|
| 3 |
tavily-python>=0.3.0
|
| 4 |
openai>=1.0.0
|
| 5 |
requests>=2.31.0
|
| 6 |
+
pytest==8.3.3
|
| 7 |
+
redis>=4.0.0
|