SentimentAnalysisForNMTTNT / py /api_controller.py
shegga's picture
πŸŽ‰ Major Refactor: Modular Architecture with Automatic Fine-Tuning
b8ae42e
#!/usr/bin/env python3
"""
Vietnamese Sentiment Analysis - API Controller
Provides REST API endpoints for sentiment analysis using FastAPI
"""
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import List, Optional
import uvicorn
import time
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Pydantic models for request/response
class TextInput(BaseModel):
text: str
language: Optional[str] = "vi"
class BatchTextInput(BaseModel):
texts: List[str]
language: Optional[str] = "vi"
class SentimentResult(BaseModel):
sentiment: str
confidence: float
probabilities: dict
processing_time: float
text: str
class BatchSentimentResult(BaseModel):
results: List[SentimentResult]
total_texts: int
sentiment_distribution: dict
average_confidence: float
total_processing_time: float
class HealthResponse(BaseModel):
status: str
model_loaded: bool
memory_usage_mb: float
timestamp: str
class ModelInfo(BaseModel):
model_name: str
architecture: str
languages: List[str]
labels: List[str]
max_sequence_length: int
memory_limit_mb: int
class APIController:
def __init__(self, sentiment_app):
self.sentiment_app = sentiment_app
self.app = FastAPI(
title="Vietnamese Sentiment Analysis API",
description="API for Vietnamese sentiment analysis using transformer models",
version="1.0.0"
)
self.setup_cors()
self.setup_routes()
def setup_cors(self):
"""Setup CORS middleware for cross-origin requests"""
self.app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # In production, specify allowed origins
allow_credentials=True,
allow_methods=["GET", "POST", "OPTIONS"],
allow_headers=["*"],
)
def setup_routes(self):
"""Setup API routes"""
@self.app.get("/", response_model=dict)
async def root():
"""Root endpoint"""
return {
"message": "Vietnamese Sentiment Analysis API",
"version": "1.0.0",
"endpoints": {
"health": "/health",
"model_info": "/model/info",
"analyze": "/analyze",
"analyze_batch": "/analyze/batch",
"docs": "/docs"
}
}
@self.app.get("/health", response_model=HealthResponse)
async def health_check():
"""Health check endpoint"""
try:
memory_usage = self.sentiment_app.get_memory_usage() if self.sentiment_app else 0
return HealthResponse(
status="healthy",
model_loaded=self.sentiment_app.model_loaded if self.sentiment_app else False,
memory_usage_mb=memory_usage,
timestamp=time.strftime('%Y-%m-%d %H:%M:%S')
)
except Exception as e:
logger.error(f"Health check failed: {e}")
raise HTTPException(status_code=500, detail="Health check failed")
@self.app.get("/model/info", response_model=ModelInfo)
async def get_model_info():
"""Get model information"""
if not self.sentiment_app:
raise HTTPException(status_code=503, detail="Model not initialized")
return ModelInfo(
model_name=self.sentiment_app.model_name,
architecture="Transformer-based sequence classification",
languages=["Vietnamese"],
labels=self.sentiment_app.sentiment_labels,
max_sequence_length=512,
memory_limit_mb=self.sentiment_app.max_memory_mb
)
@self.app.post("/analyze", response_model=SentimentResult)
async def analyze_sentiment(input_data: TextInput):
"""Analyze sentiment of a single text"""
if not self.sentiment_app or not self.sentiment_app.model_loaded:
raise HTTPException(status_code=503, detail="Model not loaded")
if not input_data.text.strip():
raise HTTPException(status_code=400, detail="Text cannot be empty")
try:
start_time = time.time()
# Get prediction from the sentiment app
sentiment, output_text = self.sentiment_app.predict_sentiment(input_data.text)
if not sentiment:
logger.error("Sentiment prediction returned None")
raise HTTPException(status_code=500, detail="Analysis failed - no sentiment returned")
logger.info(f"Sentiment prediction: {sentiment}")
logger.debug(f"Full output text: {output_text}")
# Parse the output to extract probabilities
probabilities = self._extract_probabilities(output_text)
confidence = probabilities.get(sentiment.lower(), 0.0)
logger.info(f"Extracted probabilities: {probabilities}")
logger.info(f"Confidence for {sentiment}: {confidence}")
processing_time = time.time() - start_time
return SentimentResult(
sentiment=sentiment,
confidence=confidence,
probabilities=probabilities,
processing_time=processing_time,
text=input_data.text
)
except Exception as e:
logger.error(f"Analysis failed: {e}")
raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
@self.app.post("/analyze/batch", response_model=BatchSentimentResult)
async def analyze_batch_sentiment(input_data: BatchTextInput):
"""Analyze sentiment of multiple texts"""
if not self.sentiment_app or not self.sentiment_app.model_loaded:
raise HTTPException(status_code=503, detail="Model not loaded")
if not input_data.texts or not any(text.strip() for text in input_data.texts):
raise HTTPException(status_code=400, detail="At least one non-empty text is required")
if len(input_data.texts) > 10:
raise HTTPException(status_code=400, detail="Maximum 10 texts allowed per batch")
try:
start_time = time.time()
results = []
sentiment_distribution = {"Positive": 0, "Neutral": 0, "Negative": 0}
total_confidence = 0.0
# Process each text
for text in input_data.texts:
if not text.strip():
continue
text_start_time = time.time()
sentiment, output_text = self.sentiment_app.predict_sentiment(text.strip())
if sentiment:
probabilities = self._extract_probabilities(output_text)
confidence = probabilities.get(sentiment.lower(), 0.0)
result = SentimentResult(
sentiment=sentiment,
confidence=confidence,
probabilities=probabilities,
processing_time=time.time() - text_start_time,
text=text.strip()
)
results.append(result)
sentiment_distribution[sentiment] += 1
total_confidence += confidence
total_processing_time = time.time() - start_time
if not results:
raise HTTPException(status_code=500, detail="No valid analyses completed")
average_confidence = total_confidence / len(results)
return BatchSentimentResult(
results=results,
total_texts=len(results),
sentiment_distribution=sentiment_distribution,
average_confidence=average_confidence,
total_processing_time=total_processing_time
)
except Exception as e:
logger.error(f"Batch analysis failed: {e}")
raise HTTPException(status_code=500, detail=f"Batch analysis failed: {str(e)}")
@self.app.post("/memory/cleanup")
async def cleanup_memory():
"""Manual memory cleanup endpoint"""
if not self.sentiment_app:
raise HTTPException(status_code=503, detail="App not initialized")
try:
self.sentiment_app.cleanup_memory()
memory_usage = self.sentiment_app.get_memory_usage()
return {
"message": "Memory cleanup completed",
"memory_usage_mb": memory_usage,
"timestamp": time.strftime('%Y-%m-%d %H:%M:%S')
}
except Exception as e:
logger.error(f"Memory cleanup failed: {e}")
raise HTTPException(status_code=500, detail="Memory cleanup failed")
def _extract_probabilities(self, output_text):
"""Extract probabilities from the formatted output text"""
probabilities = {"positive": 0.0, "neutral": 0.0, "negative": 0.0}
try:
lines = output_text.split('\n')
for line in lines:
# Look for lines with emojis and percentages
if '😠 **Negative:**' in line:
# Extract percentage from format: "😠 **Negative:** 25.50%"
parts = line.split('**Negative:**')[1].strip().rstrip('%')
probabilities["negative"] = float(parts) / 100
elif '😐 **Neutral:**' in line:
# Extract percentage from format: "😐 **Neutral:** 25.50%"
parts = line.split('**Neutral:**')[1].strip().rstrip('%')
probabilities["neutral"] = float(parts) / 100
elif '😊 **Positive:**' in line:
# Extract percentage from format: "😊 **Positive:** 25.50%"
parts = line.split('**Positive:**')[1].strip().rstrip('%')
probabilities["positive"] = float(parts) / 100
except Exception as e:
logger.warning(f"Failed to extract probabilities: {e}")
logger.debug(f"Output text was: {output_text}")
return probabilities
def run(self, host="0.0.0.0", port=7860):
"""Run the API server"""
logger.info(f"Starting API server on {host}:{port}")
uvicorn.run(
self.app,
host=host,
port=port,
log_level="info"
)
def create_api_controller(sentiment_app):
"""Create and return API controller instance"""
return APIController(sentiment_app)
if __name__ == "__main__":
# This allows running the API controller standalone for testing
from app import SentimentGradioApp
# Initialize the sentiment app
sentiment_app = SentimentGradioApp()
if not sentiment_app.load_model():
print("❌ Failed to load model")
exit(1)
# Create and run API controller
api_controller = create_api_controller(sentiment_app)
api_controller.run()