Run_code_api / app.py
ABAO77's picture
feat: Implement Whisper model preloading during FastAPI startup for optimized performance
cc06ed6
raw
history blame
904 Bytes
"""
English Tutor API - Main Application
Optimized with Whisper model preloading for faster pronunciation assessment
"""
from dotenv import load_dotenv
load_dotenv()
from src.apis.create_app import create_app, api_router
import uvicorn
from loguru import logger
# Create FastAPI app with Whisper preloading
app = create_app()
app.include_router(api_router)
# Add root endpoint
@app.get("/")
async def root():
return {
"message": "πŸŽ“ English Tutor API with Optimized Whisper",
"status": "ready",
"docs": "/docs",
"health": "/health"
}
if __name__ == "__main__":
logger.info("πŸš€ Starting English Tutor API server...")
uvicorn.run(
"app:app",
host="0.0.0.0",
port=8000,
reload=False, # Set to False to avoid reloading and losing preloaded model
log_level="info"
)