Spaces:
Sleeping
Sleeping
File size: 3,831 Bytes
6cbca40 cc06ed6 6cbca40 c0a7f25 61e4b1e 5a412ce cc06ed6 85fa45c cc06ed6 6cbca40 c0a7f25 61e4b1e 5a412ce 64c08d9 85fa45c 6cbca40 cc06ed6 6cbca40 cc06ed6 6cbca40 cc06ed6 85fa45c cc06ed6 85fa45c cc06ed6 85fa45c cc06ed6 85fa45c 5a412ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
from fastapi import FastAPI, APIRouter
from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from src.apis.routes.user_route import router as router_user
from src.apis.routes.chat_route import router as router_chat
from src.apis.routes.lesson_route import router as router_lesson
from src.apis.routes.evaluation_route import router as router_evaluation
from src.apis.routes.pronunciation_route import router as router_pronunciation
from src.apis.routes.speaking_route import router as router_speaking, preload_whisper_model
from src.apis.routes.ipa_route import router as router_ipa
from loguru import logger
import time
api_router = APIRouter(prefix="/api")
api_router.include_router(router_user)
api_router.include_router(router_chat)
api_router.include_router(router_lesson)
api_router.include_router(router_evaluation)
api_router.include_router(router_pronunciation)
api_router.include_router(router_speaking)
api_router.include_router(router_ipa)
@asynccontextmanager
async def lifespan(app: FastAPI):
"""
FastAPI lifespan context manager for startup and shutdown events
Preloads Whisper model during startup for faster first inference
"""
# Startup
logger.info("π Starting English Tutor API...")
startup_start = time.time()
try:
# Preload Whisper model during startup
logger.info("π¦ Preloading Whisper model for pronunciation assessment...")
success = preload_whisper_model(whisper_model="base.en")
if success:
logger.info("β
Whisper model preloaded successfully!")
logger.info("π― First pronunciation assessment will be much faster!")
else:
logger.warning("β οΈ Failed to preload Whisper model, will load on first request")
except Exception as e:
logger.error(f"β Error during Whisper preloading: {e}")
logger.warning("β οΈ Continuing without preload, model will load on first request")
startup_time = time.time() - startup_start
logger.info(f"π― English Tutor API startup completed in {startup_time:.2f}s")
logger.info("π API is ready to serve pronunciation assessments!")
yield # Application runs here
# Shutdown
logger.info("π Shutting down English Tutor API...")
def create_app():
app = FastAPI(
docs_url="/",
title="English Tutor API with Optimized Whisper",
description="Pronunciation assessment API with preloaded Whisper for faster inference",
version="2.1.0",
lifespan=lifespan # Enable preloading during startup
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Add health check endpoint for monitoring Whisper status
@app.get("/health")
async def health_check():
"""Health check endpoint that also verifies Whisper is loaded"""
try:
from src.apis.routes.speaking_route import global_assessor
whisper_loaded = global_assessor is not None
model_name = global_assessor.asr.whisper_model_name if whisper_loaded else None
return {
"status": "healthy",
"whisper_preloaded": whisper_loaded,
"whisper_model": model_name,
"api_version": "2.1.0",
"message": "English Tutor API is running" + (" with preloaded Whisper!" if whisper_loaded else "")
}
except Exception as e:
return {
"status": "healthy",
"whisper_preloaded": False,
"error": str(e),
"api_version": "2.1.0"
}
return app
|