File size: 6,249 Bytes
9db766f
28e04dd
03977cf
 
5fb4696
 
 
 
 
 
 
03977cf
 
 
 
 
 
5fb4696
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9db766f
 
 
 
 
 
 
 
 
dc6c29a
70a2026
f28285b
9db766f
 
 
 
 
 
 
 
f28285b
9db766f
 
 
 
 
 
 
 
 
 
 
70a2026
dc6c29a
5fb4696
9db766f
 
 
 
f28285b
 
 
 
5fb4696
f28285b
 
 
 
5fb4696
 
 
 
 
 
 
 
 
 
 
 
 
9db766f
5fb4696
9db766f
 
 
 
5fb4696
9db766f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fb4696
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9db766f
 
 
 
 
 
5fb4696
 
 
9db766f
 
 
 
 
 
 
5fb4696
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
"""Main FastAPI application entry point"""

import sys
from pathlib import Path
import os
import subprocess
import logging

# Configure logging first
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Add the app directory to Python path to ensure imports work
app_dir = Path(__file__).parent
if str(app_dir) not in sys.path:
    sys.path.insert(0, str(app_dir))

def install_ffmpeg():
    """Install ffmpeg on Hugging Face Spaces"""
    try:
        # Check if ffmpeg is already installed
        result = subprocess.run(["which", "ffmpeg"], capture_output=True, text=True)
        if result.returncode == 0:
            logger.info("βœ“ ffmpeg is already installed")
            # Verify it works
            version_result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
            if version_result.returncode == 0:
                logger.info(f"βœ“ ffmpeg version: {version_result.stdout.split()[2]}")
                return True
        
        logger.info("Installing ffmpeg...")
        # Update package list and install ffmpeg
        subprocess.run(["apt-get", "update"], check=True, capture_output=True)
        subprocess.run(["apt-get", "install", "-y", "ffmpeg"], check=True, capture_output=True)
        
        # Verify installation
        verify_result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
        if verify_result.returncode == 0:
            logger.info(f"βœ“ ffmpeg installed successfully: {verify_result.stdout.split()[2]}")
            return True
        else:
            logger.error("βœ— ffmpeg installation verification failed")
            return False
            
    except Exception as e:
        logger.error(f"βœ— Failed to install ffmpeg: {e}")
        return False

# Install ffmpeg before importing other modules
logger.info("===== Checking system dependencies =====")
install_ffmpeg()

from contextlib import asynccontextmanager
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn

from config import (
    API_TITLE,
    API_DESCRIPTION,
    API_VERSION,
    HUGGINGFACE_API_KEY,
    HUGGINGFACE_STANCE_MODEL_ID,
    HUGGINGFACE_LABEL_MODEL_ID,
    HOST,
    PORT,
    RELOAD,
    CORS_ORIGINS,
    CORS_CREDENTIALS,
    CORS_METHODS,
    CORS_HEADERS,
)
from services import stance_model_manager, kpa_model_manager
from routes import api_router


@asynccontextmanager
async def lifespan(app: FastAPI):
    """Load models on startup and cleanup on shutdown"""
    # Startup: Load all models
    logger.info("Loading models on startup...")
    
    # Load stance detection model
    try:
        logger.info(f"Loading stance model from Hugging Face: {HUGGINGFACE_STANCE_MODEL_ID}")
        stance_model_manager.load_model(HUGGINGFACE_STANCE_MODEL_ID, HUGGINGFACE_API_KEY)
        logger.info("βœ“ Stance model loaded successfully")
    except Exception as e:
        logger.error(f"βœ— Failed to load stance model: {str(e)}")
        logger.error("⚠️  Stance detection endpoints will not work!")
    
    # Load KPA (label) model
    try:
        logger.info(f"Loading KPA model from Hugging Face: {HUGGINGFACE_LABEL_MODEL_ID}")
        kpa_model_manager.load_model(HUGGINGFACE_LABEL_MODEL_ID, HUGGINGFACE_API_KEY)
        logger.info("βœ“ KPA model loaded successfully")
    except Exception as e:
        logger.error(f"βœ— Failed to load KPA model: {str(e)}")
        logger.error("⚠️  KPA/Label prediction endpoints will not work!")
    
    # Load STT and Chatbot models
    try:
        from services.stt_service import load_stt_model
        from services.chatbot_service import load_chatbot_model
        
        logger.info("Loading STT and Chatbot models...")
        load_stt_model()
        load_chatbot_model()
        logger.info("βœ“ STT and Chatbot models loaded successfully")
    except Exception as e:
        logger.error(f"βœ— Failed to load STT/Chatbot models: {str(e)}")
        logger.error("⚠️  Audio endpoints may not work properly!")
    
    logger.info("βœ“ API startup complete")
    logger.info(f"πŸ“š API Documentation: https://nlp-debater-project-fastapi-backend-models.hf.space/docs")
    
    yield  # Application runs here
    
    # Shutdown: Cleanup (if needed)
    logger.info("Shutting down API...")
    # Currently no cleanup needed, but you can add it here if necessary

# Create FastAPI application
app = FastAPI(
    title=API_TITLE,
    description=API_DESCRIPTION,
    version=API_VERSION,
    docs_url="/docs",
    redoc_url="/redoc",
    lifespan=lifespan,
)

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=CORS_ORIGINS,
    allow_credentials=CORS_CREDENTIALS,
    allow_methods=CORS_METHODS,
    allow_headers=CORS_HEADERS,
)

# Include API routes
app.include_router(api_router)

# Health check endpoint
@app.get("/")
async def root():
    """Health check endpoint"""
    return {
        "message": "NLP Debater API is running!",
        "status": "healthy",
        "docs": "/docs"
    }

@app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {
        "status": "healthy",
        "message": "API is running successfully"
    }

@app.get("/check-ffmpeg")
async def check_ffmpeg():
    """Check if ffmpeg is available"""
    try:
        result = subprocess.run(["ffmpeg", "-version"], capture_output=True, text=True)
        if result.returncode == 0:
            return {
                "status": "available", 
                "version": result.stdout.split('\n')[0],
                "message": "ffmpeg is ready for audio processing"
            }
        else:
            return {"status": "error", "error": result.stderr}
    except FileNotFoundError:
        return {"status": "ffmpeg not found"}


if __name__ == "__main__":
    # Run the API server
    # Access at: http://localhost:8000
    # API docs at: http://localhost:8000/docs
    
    logger.info(f"πŸš€ Starting server on {HOST}:{PORT}")
    logger.info(f"πŸ“š Documentation available at: http://{HOST}:{PORT}/docs")
    
    # Run the API server
    uvicorn.run(
        "main:app",
        host=HOST,
        port=PORT,
        reload=RELOAD,
        log_level="info"
    )