malek-messaoudii commited on
Commit
8791d59
·
1 Parent(s): a71355d

add mcp part

Browse files
main.py CHANGED
@@ -175,6 +175,28 @@ except ImportError as e:
175
  except Exception as e:
176
  logger.warning(f"⚠ Failed loading Voice Chat route: {e}")
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  # --- Basic routes ---
179
  @app.get("/health", tags=["Health"])
180
  async def health():
 
175
  except Exception as e:
176
  logger.warning(f"⚠ Failed loading Voice Chat route: {e}")
177
 
178
+ # Dans main.py, après les autres imports
179
+ try:
180
+ from services.mcp_service import init_mcp_server
181
+ from routes.mcp_routes import router as mcp_router
182
+ MCP_ENABLED = True
183
+ except ImportError as e:
184
+ logger.warning(f"⚠ MCP not available: {e}")
185
+ MCP_ENABLED = False
186
+
187
+ # Dans le lifespan manager, après le chargement des modèles
188
+ if MCP_ENABLED:
189
+ try:
190
+ init_mcp_server(app)
191
+ logger.info("✓ MCP Server initialized")
192
+ except Exception as e:
193
+ logger.error(f"✗ MCP initialization failed: {e}")
194
+
195
+ # Après les autres routes, ajoutez
196
+ if MCP_ENABLED:
197
+ app.include_router(mcp_router)
198
+ logger.info("✓ MCP routes loaded")
199
+
200
  # --- Basic routes ---
201
  @app.get("/health", tags=["Health"])
202
  async def health():
mcp/resources.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict, Any
2
+ from mcp import Resource
3
+ from datetime import datetime
4
+ import logging
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ class ModelResource(Resource):
9
+ """Resource representing a loaded model"""
10
+ def __init__(self, model_name: str, model_info: Dict[str, Any]):
11
+ self.model_name = model_name
12
+ self.model_info = model_info
13
+ super().__init__(
14
+ uri=f"model://{model_name}",
15
+ name=model_name,
16
+ description=f"{model_name} model information and status",
17
+ mime_type="application/json"
18
+ )
19
+
20
+ async def get_content(self) -> str:
21
+ """Get model information as JSON"""
22
+ import json
23
+ return json.dumps({
24
+ **self.model_info,
25
+ "timestamp": datetime.now().isoformat(),
26
+ "uri": self.uri
27
+ })
28
+
29
+ class StanceDetectionResource(ModelResource):
30
+ """Resource for stance detection model"""
31
+ def __init__(self):
32
+ from services.stance_model_manager import stance_model_manager
33
+
34
+ model_info = {
35
+ "type": "stance_detection",
36
+ "description": "Detects PRO/CON stance for topic-argument pairs",
37
+ "capabilities": ["single_prediction", "batch_prediction"],
38
+ "input_format": {"topic": "string", "argument": "string"},
39
+ "output_format": {
40
+ "predicted_stance": "PRO/CON",
41
+ "confidence": "float",
42
+ "probabilities": {"PRO": "float", "CON": "float"}
43
+ }
44
+ }
45
+
46
+ if stance_model_manager and stance_model_manager.model_loaded:
47
+ model_info.update({
48
+ "loaded": True,
49
+ "device": str(stance_model_manager.device),
50
+ "model_id": getattr(stance_model_manager, 'model_id', 'unknown')
51
+ })
52
+ else:
53
+ model_info["loaded"] = False
54
+
55
+ super().__init__("stance_detection", model_info)
56
+
57
+ class KPAResource(ModelResource):
58
+ """Resource for Keypoint-Argument matching model"""
59
+ def __init__(self):
60
+ from services.label_model_manager import kpa_model_manager
61
+
62
+ model_info = {
63
+ "type": "keypoint_argument_matching",
64
+ "description": "Matches arguments with key points (apparie/non_apparie)",
65
+ "capabilities": ["single_prediction", "batch_prediction"],
66
+ "input_format": {"argument": "string", "key_point": "string"},
67
+ "output_format": {
68
+ "prediction": "0/1",
69
+ "label": "apparie/non_apparie",
70
+ "confidence": "float",
71
+ "probabilities": {"non_apparie": "float", "apparie": "float"}
72
+ }
73
+ }
74
+
75
+ if kpa_model_manager and kpa_model_manager.model_loaded:
76
+ model_info.update({
77
+ "loaded": True,
78
+ "device": str(kpa_model_manager.device),
79
+ "model_id": getattr(kpa_model_manager, 'model_id', 'unknown'),
80
+ "max_length": getattr(kpa_model_manager, 'max_length', 256)
81
+ })
82
+ else:
83
+ model_info["loaded"] = False
84
+
85
+ super().__init__("kpa_matching", model_info)
86
+
87
+ class STTResource(Resource):
88
+ """Resource for Speech-to-Text capabilities"""
89
+ def __init__(self):
90
+ from config import GROQ_API_KEY, GROQ_STT_MODEL
91
+
92
+ super().__init__(
93
+ uri="service://speech-to-text",
94
+ name="speech_to_text",
95
+ description="Speech-to-Text service using Groq Whisper API",
96
+ mime_type="application/json"
97
+ )
98
+
99
+ self.config = {
100
+ "provider": "Groq",
101
+ "model": GROQ_STT_MODEL,
102
+ "enabled": bool(GROQ_API_KEY),
103
+ "language": "English only",
104
+ "max_audio_size": "10MB",
105
+ "supported_formats": ["wav", "mp3", "m4a", "mp4"]
106
+ }
107
+
108
+ async def get_content(self) -> str:
109
+ """Get STT service information"""
110
+ import json
111
+ return json.dumps({
112
+ **self.config,
113
+ "timestamp": datetime.now().isoformat(),
114
+ "uri": self.uri
115
+ })
116
+
117
+ class TTSResource(Resource):
118
+ """Resource for Text-to-Speech capabilities"""
119
+ def __init__(self):
120
+ from config import GROQ_API_KEY, GROQ_TTS_MODEL, GROQ_TTS_VOICE
121
+
122
+ super().__init__(
123
+ uri="service://text-to-speech",
124
+ name="text_to_speech",
125
+ description="Text-to-Speech service using Groq PlayAI TTS",
126
+ mime_type="application/json"
127
+ )
128
+
129
+ self.config = {
130
+ "provider": "Groq",
131
+ "model": GROQ_TTS_MODEL,
132
+ "voice": GROQ_TTS_VOICE,
133
+ "enabled": bool(GROQ_API_KEY),
134
+ "language": "English only",
135
+ "format": "wav/mp3",
136
+ "voices_available": ["Aaliyah-PlayAI", "Aria-PlayAI", "Dexter-PlayAI", "Fiona-PlayAI"]
137
+ }
138
+
139
+ async def get_content(self) -> str:
140
+ """Get TTS service information"""
141
+ import json
142
+ return json.dumps({
143
+ **self.config,
144
+ "timestamp": datetime.now().isoformat(),
145
+ "uri": self.uri
146
+ })
147
+
148
+ class ChatbotResource(Resource):
149
+ """Resource for Chatbot capabilities"""
150
+ def __init__(self):
151
+ from config import GROQ_API_KEY, GROQ_CHAT_MODEL
152
+
153
+ super().__init__(
154
+ uri="service://chatbot",
155
+ name="chatbot",
156
+ description="Chatbot service using Groq LLM API",
157
+ mime_type="application/json"
158
+ )
159
+
160
+ self.config = {
161
+ "provider": "Groq",
162
+ "model": GROQ_CHAT_MODEL,
163
+ "enabled": bool(GROQ_API_KEY),
164
+ "language": "English only",
165
+ "features": ["conversation", "context_awareness", "voice_chat"],
166
+ "max_context_length": 8192
167
+ }
168
+
169
+ async def get_content(self) -> str:
170
+ """Get chatbot service information"""
171
+ import json
172
+ return json.dumps({
173
+ **self.config,
174
+ "timestamp": datetime.now().isoformat(),
175
+ "uri": self.uri
176
+ })
177
+
178
+ class ArgumentGenerationResource(Resource):
179
+ """Resource for Argument Generation model (à compléter)"""
180
+ def __init__(self):
181
+ super().__init__(
182
+ uri="model://argument-generation",
183
+ name="argument_generation",
184
+ description="Persuasive argument generation model",
185
+ mime_type="application/json"
186
+ )
187
+
188
+ self.config = {
189
+ "type": "argument_generation",
190
+ "status": "not_implemented",
191
+ "description": "TODO: Implement your argument generation model",
192
+ "planned_capabilities": [
193
+ "single_argument_generation",
194
+ "batch_generation",
195
+ "stance_controlled_generation",
196
+ "counter_argument_generation"
197
+ ]
198
+ }
199
+
200
+ async def get_content(self) -> str:
201
+ """Get argument generation model information"""
202
+ import json
203
+ return json.dumps({
204
+ **self.config,
205
+ "timestamp": datetime.now().isoformat(),
206
+ "uri": self.uri,
207
+ "note": "This is a placeholder. Implement your model in services/argument_generation.py"
208
+ })
209
+
210
+ class SystemHealthResource(Resource):
211
+ """Resource for system health and status"""
212
+ def __init__(self):
213
+ super().__init__(
214
+ uri="system://health",
215
+ name="system_health",
216
+ description="System health and service status",
217
+ mime_type="application/json"
218
+ )
219
+
220
+ async def get_content(self) -> str:
221
+ """Get system health information"""
222
+ import json
223
+ from datetime import datetime
224
+
225
+ # Collect model status
226
+ model_status = {}
227
+ try:
228
+ from services.stance_model_manager import stance_model_manager
229
+ model_status["stance_detection"] = {
230
+ "loaded": stance_model_manager.model_loaded if stance_model_manager else False
231
+ }
232
+ except:
233
+ model_status["stance_detection"] = {"loaded": False}
234
+
235
+ try:
236
+ from services.label_model_manager import kpa_model_manager
237
+ model_status["kpa_matching"] = {
238
+ "loaded": kpa_model_manager.model_loaded if kpa_model_manager else False
239
+ }
240
+ except:
241
+ model_status["kpa_matching"] = {"loaded": False}
242
+
243
+ # Service status
244
+ from config import GROQ_API_KEY
245
+ service_status = {
246
+ "stt": bool(GROQ_API_KEY),
247
+ "tts": bool(GROQ_API_KEY),
248
+ "chatbot": bool(GROQ_API_KEY),
249
+ "argument_generation": False # À implémenter
250
+ }
251
+
252
+ return json.dumps({
253
+ "timestamp": datetime.now().isoformat(),
254
+ "status": "operational",
255
+ "models": model_status,
256
+ "services": service_status,
257
+ "api_version": "1.0.0",
258
+ "mcp_version": "1.0.0"
259
+ })
260
+
261
+ class APIDocumentationResource(Resource):
262
+ """Resource for API documentation"""
263
+ def __init__(self):
264
+ super().__init__(
265
+ uri="documentation://api",
266
+ name="api_documentation",
267
+ description="API endpoints documentation",
268
+ mime_type="application/json"
269
+ )
270
+
271
+ self.documentation = {
272
+ "endpoints": {
273
+ "mcp": {
274
+ "/mcp/health": "GET - Health check",
275
+ "/mcp/resources": "GET - List all resources",
276
+ "/mcp/tools": "GET - List all tools",
277
+ "/mcp/tools/call": "POST - Call a tool"
278
+ },
279
+ "models": {
280
+ "/api/v1/kpa/predict": "POST - KPA prediction",
281
+ "/api/v1/stance/predict": "POST - Stance prediction",
282
+ "/api/v1/stance/batch-predict": "POST - Batch stance prediction"
283
+ },
284
+ "voice": {
285
+ "/api/v1/stt/": "POST - Speech to text",
286
+ "/api/v1/tts/": "POST - Text to speech",
287
+ "/voice-chat/voice": "POST - Voice chat",
288
+ "/voice-chat/text": "POST - Text chat"
289
+ }
290
+ },
291
+ "authentication": "Currently none (add JWT or API key based auth)",
292
+ "rate_limits": "None configured",
293
+ "version": "2.0.0"
294
+ }
295
+
296
+ async def get_content(self) -> str:
297
+ """Get API documentation"""
298
+ import json
299
+ return json.dumps({
300
+ **self.documentation,
301
+ "timestamp": datetime.now().isoformat(),
302
+ "uri": self.uri
303
+ })
304
+
305
+ def get_resources() -> List[Resource]:
306
+ """Return all available MCP resources"""
307
+ resources = []
308
+
309
+ try:
310
+ resources.append(StanceDetectionResource())
311
+ except Exception as e:
312
+ logger.warning(f"Failed to create StanceDetectionResource: {e}")
313
+
314
+ try:
315
+ resources.append(KPAResource())
316
+ except Exception as e:
317
+ logger.warning(f"Failed to create KPAResource: {e}")
318
+
319
+ try:
320
+ resources.append(STTResource())
321
+ except Exception as e:
322
+ logger.warning(f"Failed to create STTResource: {e}")
323
+
324
+ try:
325
+ resources.append(TTSResource())
326
+ except Exception as e:
327
+ logger.warning(f"Failed to create TTSResource: {e}")
328
+
329
+ try:
330
+ resources.append(ChatbotResource())
331
+ except Exception as e:
332
+ logger.warning(f"Failed to create ChatbotResource: {e}")
333
+
334
+ try:
335
+ resources.append(ArgumentGenerationResource())
336
+ except Exception as e:
337
+ logger.warning(f"Failed to create ArgumentGenerationResource: {e}")
338
+
339
+ try:
340
+ resources.append(SystemHealthResource())
341
+ except Exception as e:
342
+ logger.warning(f"Failed to create SystemHealthResource: {e}")
343
+
344
+ try:
345
+ resources.append(APIDocumentationResource())
346
+ except Exception as e:
347
+ logger.warning(f"Failed to create APIDocumentationResource: {e}")
348
+
349
+ logger.info(f"Created {len(resources)} MCP resources")
350
+ return resources
mcp/run_mcp.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from server import run_mcp
2
+
3
+ if __name__ == "__main__":
4
+ run_mcp()
mcp/server.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Optional
2
+ import json
3
+ from fastapi import FastAPI, HTTPException
4
+ from mcp import Server, Resource, Tool
5
+ from mcp.types import TextContent, ImageContent
6
+ import logging
7
+ from .resources import get_resources
8
+
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ class MCPServer:
13
+ def __init__(self, app: FastAPI):
14
+ self.app = app
15
+ self.server = Server()
16
+ self._setup_resources()
17
+ self._setup_tools()
18
+
19
+ def _setup_resources(self):
20
+ """Définir les ressources exposées via MCP"""
21
+
22
+ resources = get_resources()
23
+ for resource in resources:
24
+ self.server.add_resource(resource)
25
+
26
+ def _setup_tools(self):
27
+ """Définir les outils exposés via MCP"""
28
+ from .tools import get_tools
29
+
30
+ tools = get_tools()
31
+ for tool in tools:
32
+ self.server.add_tool(tool)
33
+
34
+ async def list_resources(self) -> List[dict]:
35
+ """Lister toutes les ressources disponibles"""
36
+ return [
37
+ {
38
+ "uri": resource.uri,
39
+ "name": resource.name,
40
+ "description": resource.description,
41
+ "mime_type": resource.mime_type
42
+ }
43
+ for resource in self.server.resources.values()
44
+ ]
45
+
46
+ async def list_tools(self) -> List[dict]:
47
+ """Lister tous les outils disponibles"""
48
+ return [
49
+ {
50
+ "name": tool.name,
51
+ "description": tool.description,
52
+ "input_schema": tool.input_schema
53
+ }
54
+ for tool in self.server.tools.values()
55
+ ]
56
+
57
+ async def call_tool(self, tool_name: str, arguments: dict) -> Any:
58
+ """Appeler un outil MCP"""
59
+ if tool_name not in self.server.tools:
60
+ raise HTTPException(status_code=404, detail=f"Tool {tool_name} not found")
61
+
62
+ try:
63
+ tool = self.server.tools[tool_name]
64
+ result = await tool.execute(arguments)
65
+ return result
66
+ except Exception as e:
67
+ logger.error(f"Tool execution error: {str(e)}")
68
+ raise HTTPException(status_code=500, detail=f"Tool execution failed: {str(e)}")
mcp/tools.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List
2
+ from mcp import Tool
3
+ import logging
4
+ from services import (
5
+ kpa_model_manager,
6
+ stance_model_manager,
7
+ chat_service
8
+ )
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ async def predict_kpa_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
13
+ """Tool for keypoint-argument matching prediction"""
14
+ try:
15
+ argument = arguments.get("argument", "")
16
+ key_point = arguments.get("key_point", "")
17
+
18
+ if not argument or not key_point:
19
+ return {"error": "Both argument and key_point are required"}
20
+
21
+ result = kpa_model_manager.predict(argument, key_point)
22
+ return {
23
+ "prediction": result["prediction"],
24
+ "label": result["label"],
25
+ "confidence": result["confidence"],
26
+ "probabilities": result["probabilities"]
27
+ }
28
+ except Exception as e:
29
+ logger.error(f"KPA tool error: {str(e)}")
30
+ return {"error": str(e)}
31
+
32
+ async def predict_stance_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
33
+ """Tool for stance detection prediction"""
34
+ try:
35
+ topic = arguments.get("topic", "")
36
+ argument = arguments.get("argument", "")
37
+
38
+ if not topic or not argument:
39
+ return {"error": "Both topic and argument are required"}
40
+
41
+ result = stance_model_manager.predict(topic, argument)
42
+ return {
43
+ "predicted_stance": result["predicted_stance"],
44
+ "confidence": result["confidence"],
45
+ "probability_con": result["probability_con"],
46
+ "probability_pro": result["probability_pro"]
47
+ }
48
+ except Exception as e:
49
+ logger.error(f"Stance tool error: {str(e)}")
50
+ return {"error": str(e)}
51
+
52
+ async def batch_stance_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
53
+ """Tool for batch stance detection"""
54
+ try:
55
+ items = arguments.get("items", [])
56
+
57
+ if not items:
58
+ return {"error": "Items list is required"}
59
+
60
+ results = []
61
+ for item in items:
62
+ result = stance_model_manager.predict(item["topic"], item["argument"])
63
+ results.append({
64
+ "topic": item["topic"],
65
+ "argument": item["argument"],
66
+ **result
67
+ })
68
+
69
+ return {
70
+ "results": results,
71
+ "total_processed": len(results)
72
+ }
73
+ except Exception as e:
74
+ logger.error(f"Batch stance tool error: {str(e)}")
75
+ return {"error": str(e)}
76
+
77
+ async def generate_argument_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
78
+ """Tool for argument generation (à compléter avec votre modèle)"""
79
+ try:
80
+ prompt = arguments.get("prompt", "")
81
+ context = arguments.get("context", "")
82
+
83
+ if not prompt:
84
+ return {"error": "Prompt is required"}
85
+
86
+ # TODO: Intégrer votre modèle d'argument generation ici
87
+ # Pour l'instant, placeholder
88
+ from services.chat_service import generate_chat_response
89
+
90
+ response = generate_chat_response(
91
+ user_input=f"Generate argument for: {prompt}. Context: {context}",
92
+ system_prompt="You are an argument generation assistant. Generate persuasive arguments based on the given prompt and context."
93
+ )
94
+
95
+ return {
96
+ "generated_argument": response,
97
+ "prompt": prompt,
98
+ "context": context
99
+ }
100
+ except Exception as e:
101
+ logger.error(f"Argument generation tool error: {str(e)}")
102
+ return {"error": str(e)}
103
+
104
+ async def voice_chat_tool(arguments: Dict[str, Any]) -> Dict[str, Any]:
105
+ """Tool for voice chat interaction"""
106
+ try:
107
+ text = arguments.get("text", "")
108
+ conversation_id = arguments.get("conversation_id", "")
109
+
110
+ if not text:
111
+ return {"error": "Text input is required"}
112
+
113
+ # Utiliser le service de chat existant
114
+ from services.chat_service import generate_chat_response
115
+
116
+ response = generate_chat_response(
117
+ user_input=text,
118
+ conversation_id=conversation_id if conversation_id else None
119
+ )
120
+
121
+ # Optionnel: Ajouter TTS si nécessaire
122
+ tts_required = arguments.get("tts", False)
123
+ audio_url = None
124
+
125
+ if tts_required:
126
+ from services.tts_service import text_to_speech
127
+ # TODO: Gérer le stockage et l'URL de l'audio
128
+
129
+ return {
130
+ "response": response,
131
+ "conversation_id": conversation_id,
132
+ "has_audio": tts_required,
133
+ "audio_url": audio_url
134
+ }
135
+ except Exception as e:
136
+ logger.error(f"Voice chat tool error: {str(e)}")
137
+ return {"error": str(e)}
138
+
139
+ def get_tools() -> List[Tool]:
140
+ """Retourne tous les outils disponibles"""
141
+ return [
142
+ Tool(
143
+ name="predict_kpa",
144
+ description="Predict keypoint-argument matching for a single pair",
145
+ input_schema={
146
+ "type": "object",
147
+ "properties": {
148
+ "argument": {"type": "string", "description": "The argument text"},
149
+ "key_point": {"type": "string", "description": "The key point to evaluate"}
150
+ },
151
+ "required": ["argument", "key_point"]
152
+ },
153
+ execute=predict_kpa_tool
154
+ ),
155
+ Tool(
156
+ name="predict_stance",
157
+ description="Predict stance for a topic-argument pair",
158
+ input_schema={
159
+ "type": "object",
160
+ "properties": {
161
+ "topic": {"type": "string", "description": "The debate topic"},
162
+ "argument": {"type": "string", "description": "The argument to classify"}
163
+ },
164
+ "required": ["topic", "argument"]
165
+ },
166
+ execute=predict_stance_tool
167
+ ),
168
+ Tool(
169
+ name="batch_predict_stance",
170
+ description="Predict stance for multiple topic-argument pairs",
171
+ input_schema={
172
+ "type": "object",
173
+ "properties": {
174
+ "items": {
175
+ "type": "array",
176
+ "items": {
177
+ "type": "object",
178
+ "properties": {
179
+ "topic": {"type": "string"},
180
+ "argument": {"type": "string"}
181
+ },
182
+ "required": ["topic", "argument"]
183
+ },
184
+ "description": "List of topic-argument pairs"
185
+ }
186
+ },
187
+ "required": ["items"]
188
+ },
189
+ execute=batch_stance_tool
190
+ ),
191
+ Tool(
192
+ name="generate_argument",
193
+ description="Generate persuasive arguments based on prompt and context",
194
+ input_schema={
195
+ "type": "object",
196
+ "properties": {
197
+ "prompt": {"type": "string", "description": "Main topic or question"},
198
+ "context": {"type": "string", "description": "Additional context"},
199
+ "stance": {
200
+ "type": "string",
201
+ "enum": ["pro", "con", "neutral"],
202
+ "description": "Desired stance"
203
+ }
204
+ },
205
+ "required": ["prompt"]
206
+ },
207
+ execute=generate_argument_tool
208
+ ),
209
+ Tool(
210
+ name="voice_chat",
211
+ description="Chat with voice assistant capabilities",
212
+ input_schema={
213
+ "type": "object",
214
+ "properties": {
215
+ "text": {"type": "string", "description": "Text input"},
216
+ "conversation_id": {"type": "string", "description": "Conversation ID for context"},
217
+ "tts": {"type": "boolean", "description": "Generate audio response"}
218
+ },
219
+ "required": ["text"]
220
+ },
221
+ execute=voice_chat_tool
222
+ )
223
+ ]
mcp/types.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Type definitions for MCP (Model Context Protocol)
3
+ """
4
+ from typing import Dict, Any, List, Optional, Union, TypedDict
5
+ from enum import Enum
6
+ from datetime import datetime
7
+ from pydantic import BaseModel, Field
8
+
9
+ # ==================== ENUMS ====================
10
+
11
+ class ModelType(str, Enum):
12
+ """Types of models available"""
13
+ STANCE_DETECTION = "stance_detection"
14
+ KPA_MATCHING = "kpa_matching"
15
+ ARGUMENT_GENERATION = "argument_generation"
16
+ CHATBOT = "chatbot"
17
+
18
+ class StanceType(str, Enum):
19
+ """Stance types"""
20
+ PRO = "PRO"
21
+ CON = "CON"
22
+ NEUTRAL = "NEUTRAL"
23
+
24
+ class KpaLabel(str, Enum):
25
+ """KPA matching labels"""
26
+ APPARIE = "apparie"
27
+ NON_APPARIE = "non_apparie"
28
+
29
+ class ServiceStatus(str, Enum):
30
+ """Service status"""
31
+ OPERATIONAL = "operational"
32
+ DEGRADED = "degraded"
33
+ MAINTENANCE = "maintenance"
34
+ OFFLINE = "offline"
35
+
36
+ class ToolCategory(str, Enum):
37
+ """Tool categories"""
38
+ PREDICTION = "prediction"
39
+ GENERATION = "generation"
40
+ TRANSFORMATION = "transformation"
41
+ ANALYSIS = "analysis"
42
+ UTILITY = "utility"
43
+
44
+ # ==================== CORE TYPES ====================
45
+
46
+ class ResourceMetadata(TypedDict):
47
+ """Metadata for a resource"""
48
+ uri: str
49
+ name: str
50
+ description: Optional[str]
51
+ mime_type: str
52
+ created_at: datetime
53
+ updated_at: datetime
54
+ tags: List[str]
55
+
56
+ class ToolMetadata(TypedDict):
57
+ """Metadata for a tool"""
58
+ name: str
59
+ description: str
60
+ version: str
61
+ category: ToolCategory
62
+ input_schema: Dict[str, Any]
63
+ output_schema: Dict[str, Any]
64
+ rate_limit: Optional[int]
65
+ requires_auth: bool
66
+
67
+ class ModelMetadata(TypedDict):
68
+ """Metadata for a model"""
69
+ model_id: str
70
+ model_type: ModelType
71
+ provider: str
72
+ version: str
73
+ description: str
74
+ capabilities: List[str]
75
+ parameters: Dict[str, Any]
76
+ hardware_requirements: Dict[str, Any]
77
+
78
+ # ==================== PREDICTION TYPES ====================
79
+
80
+ class PredictionInput(BaseModel):
81
+ """Base class for prediction inputs"""
82
+ model_id: Optional[str] = Field(None, description="Specific model to use")
83
+
84
+ class StancePredictionInput(PredictionInput):
85
+ """Input for stance prediction"""
86
+ topic: str = Field(..., min_length=5, max_length=500, description="Debate topic")
87
+ argument: str = Field(..., min_length=5, max_length=1000, description="Argument text")
88
+
89
+ class Config:
90
+ json_schema_extra = {
91
+ "example": {
92
+ "topic": "Climate change is the most pressing issue of our time",
93
+ "argument": "Renewable energy investments have created millions of jobs worldwide"
94
+ }
95
+ }
96
+
97
+ class KPAPredictionInput(PredictionInput):
98
+ """Input for KPA prediction"""
99
+ argument: str = Field(..., description="Argument text")
100
+ key_point: str = Field(..., description="Key point to match")
101
+
102
+ class Config:
103
+ json_schema_extra = {
104
+ "example": {
105
+ "argument": "Renewable energy is cost-effective in the long term",
106
+ "key_point": "Economic benefits of green energy"
107
+ }
108
+ }
109
+
110
+ class BatchPredictionInput(BaseModel):
111
+ """Input for batch predictions"""
112
+ items: List[Union[StancePredictionInput, KPAPredictionInput]]
113
+ batch_size: Optional[int] = Field(10, ge=1, le=100)
114
+ parallel: bool = Field(False, description="Process in parallel")
115
+
116
+ # ==================== GENERATION TYPES ====================
117
+
118
+ class ArgumentGenerationInput(BaseModel):
119
+ """Input for argument generation"""
120
+ prompt: str = Field(..., description="Main topic or question")
121
+ context: Optional[str] = Field(None, description="Additional context")
122
+ stance: Optional[StanceType] = Field(StanceType.NEUTRAL, description="Desired stance")
123
+ length: Optional[str] = Field("medium", description="Argument length: short/medium/long")
124
+ style: Optional[str] = Field("persuasive", description="Writing style")
125
+ num_arguments: Optional[int] = Field(1, ge=1, le=5, description="Number of arguments to generate")
126
+
127
+ class Config:
128
+ json_schema_extra = {
129
+ "example": {
130
+ "prompt": "Should artificial intelligence be regulated?",
131
+ "stance": "PRO",
132
+ "context": "Focus on ethical considerations",
133
+ "length": "medium"
134
+ }
135
+ }
136
+
137
+ class CounterArgumentInput(BaseModel):
138
+ """Input for counter-argument generation"""
139
+ original_argument: str = Field(..., description="Original argument to counter")
140
+ target_stance: StanceType = Field(..., description="Stance for counter-argument")
141
+ context: Optional[str] = Field(None, description="Additional context")
142
+
143
+ class Config:
144
+ json_schema_extra = {
145
+ "example": {
146
+ "original_argument": "AI regulation stifles innovation",
147
+ "target_stance": "CON",
148
+ "context": "Focus on safety and ethics"
149
+ }
150
+ }
151
+
152
+ # ==================== VOICE TYPES ====================
153
+
154
+ class AudioFormat(str, Enum):
155
+ """Supported audio formats"""
156
+ WAV = "wav"
157
+ MP3 = "mp3"
158
+ M4A = "m4a"
159
+ OGG = "ogg"
160
+
161
+ class VoiceProfile(str, Enum):
162
+ """Available voice profiles"""
163
+ ALIYAH = "Aaliyah-PlayAI"
164
+ ARIA = "Aria-PlayAI"
165
+ DEXTER = "Dexter-PlayAI"
166
+ FIONA = "Fiona-PlayAI"
167
+
168
+ class STTInput(BaseModel):
169
+ """Input for speech-to-text"""
170
+ audio_format: AudioFormat = Field(AudioFormat.WAV, description="Audio format")
171
+ language: str = Field("en", description="Language code (en, fr, etc.)")
172
+ enable_timestamps: bool = Field(False, description="Include word timestamps")
173
+
174
+ class Config:
175
+ json_schema_extra = {
176
+ "example": {
177
+ "audio_format": "wav",
178
+ "language": "en",
179
+ "enable_timestamps": False
180
+ }
181
+ }
182
+
183
+ class TTSInput(BaseModel):
184
+ """Input for text-to-speech"""
185
+ text: str = Field(..., description="Text to convert to speech")
186
+ voice: VoiceProfile = Field(VoiceProfile.ALIYAH, description="Voice to use")
187
+ format: AudioFormat = Field(AudioFormat.WAV, description="Output format")
188
+ speed: float = Field(1.0, ge=0.5, le=2.0, description="Speech speed")
189
+ pitch: float = Field(1.0, ge=0.5, le=2.0, description="Voice pitch")
190
+
191
+ class Config:
192
+ json_schema_extra = {
193
+ "example": {
194
+ "text": "Hello, this is a test of text-to-speech.",
195
+ "voice": "Aaliyah-PlayAI",
196
+ "format": "wav",
197
+ "speed": 1.0,
198
+ "pitch": 1.0
199
+ }
200
+ }
201
+
202
+ # ==================== RESPONSE TYPES ====================
203
+
204
+ class PredictionResult(BaseModel):
205
+ """Base prediction result"""
206
+ prediction: Union[int, str]
207
+ confidence: float = Field(..., ge=0.0, le=1.0)
208
+ processing_time: Optional[float] = Field(None, description="Processing time in seconds")
209
+
210
+ class StancePredictionResult(PredictionResult):
211
+ """Stance prediction result"""
212
+ predicted_stance: StanceType
213
+ probability_pro: float = Field(..., ge=0.0, le=1.0)
214
+ probability_con: float = Field(..., ge=0.0, le=1.0)
215
+ topic: str
216
+ argument: str
217
+
218
+ class KPAPredictionResult(PredictionResult):
219
+ """KPA prediction result"""
220
+ label: KpaLabel
221
+ probabilities: Dict[KpaLabel, float]
222
+ argument: str
223
+ key_point: str
224
+
225
+ class GenerationResult(BaseModel):
226
+ """Base generation result"""
227
+ generated_text: str
228
+ prompt: str
229
+ context: Optional[str]
230
+ parameters: Dict[str, Any]
231
+ generation_time: Optional[float]
232
+
233
+ class ArgumentGenerationResult(GenerationResult):
234
+ """Argument generation result"""
235
+ stance: StanceType
236
+ length: str
237
+ style: str
238
+ coherence_score: Optional[float] = Field(None, ge=0.0, le=1.0)
239
+
240
+ class BatchResult(BaseModel):
241
+ """Batch processing result"""
242
+ results: List[Union[StancePredictionResult, KPAPredictionResult, ArgumentGenerationResult]]
243
+ total_processed: int
244
+ successful: int
245
+ failed: int
246
+ average_confidence: Optional[float]
247
+ total_time: float
248
+
249
+ class ErrorResponse(BaseModel):
250
+ """Error response"""
251
+ error: str
252
+ code: Optional[str]
253
+ details: Optional[Dict[str, Any]]
254
+ timestamp: datetime = Field(default_factory=datetime.now)
255
+
256
+ class HealthResponse(BaseModel):
257
+ """Health check response"""
258
+ status: ServiceStatus
259
+ version: str
260
+ uptime: float
261
+ models: Dict[str, bool]
262
+ services: Dict[str, bool]
263
+ timestamp: datetime = Field(default_factory=datetime.now)
264
+
265
+ # ==================== TOOL EXECUTION TYPES ====================
266
+
267
+ class ToolExecutionContext(BaseModel):
268
+ """Context for tool execution"""
269
+ tool_id: str
270
+ user_id: Optional[str]
271
+ session_id: Optional[str]
272
+ timestamp: datetime = Field(default_factory=datetime.now)
273
+ metadata: Optional[Dict[str, Any]]
274
+
275
+ class ToolExecutionResult(BaseModel):
276
+ """Result of tool execution"""
277
+ success: bool
278
+ output: Optional[Dict[str, Any]]
279
+ error: Optional[str]
280
+ execution_time: float
281
+ context: ToolExecutionContext
282
+
283
+ # ==================== CONVERSATION TYPES ====================
284
+
285
+ class MessageRole(str, Enum):
286
+ """Roles in conversation"""
287
+ USER = "user"
288
+ ASSISTANT = "assistant"
289
+ SYSTEM = "system"
290
+
291
+ class ConversationMessage(BaseModel):
292
+ """Single message in conversation"""
293
+ role: MessageRole
294
+ content: str
295
+ timestamp: datetime = Field(default_factory=datetime.now)
296
+ metadata: Optional[Dict[str, Any]]
297
+
298
+ class ConversationState(BaseModel):
299
+ """Conversation state"""
300
+ conversation_id: str
301
+ messages: List[ConversationMessage]
302
+ created_at: datetime
303
+ updated_at: datetime = Field(default_factory=datetime.now)
304
+ metadata: Dict[str, Any] = Field(default_factory=dict)
305
+
306
+ # ==================== EXPORT ====================
307
+
308
+ __all__ = [
309
+ # Enums
310
+ "ModelType",
311
+ "StanceType",
312
+ "KpaLabel",
313
+ "ServiceStatus",
314
+ "ToolCategory",
315
+ "AudioFormat",
316
+ "VoiceProfile",
317
+ "MessageRole",
318
+
319
+ # Input Types
320
+ "PredictionInput",
321
+ "StancePredictionInput",
322
+ "KPAPredictionInput",
323
+ "BatchPredictionInput",
324
+ "ArgumentGenerationInput",
325
+ "CounterArgumentInput",
326
+ "STTInput",
327
+ "TTSInput",
328
+
329
+ # Result Types
330
+ "PredictionResult",
331
+ "StancePredictionResult",
332
+ "KPAPredictionResult",
333
+ "GenerationResult",
334
+ "ArgumentGenerationResult",
335
+ "BatchResult",
336
+
337
+ # Response Types
338
+ "ErrorResponse",
339
+ "HealthResponse",
340
+
341
+ # Tool Types
342
+ "ToolExecutionContext",
343
+ "ToolExecutionResult",
344
+
345
+ # Conversation Types
346
+ "ConversationMessage",
347
+ "ConversationState",
348
+
349
+ # TypedDicts (for compatibility)
350
+ "ResourceMetadata",
351
+ "ToolMetadata",
352
+ "ModelMetadata"
353
+ ]
models/mcp_models.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ class ToolCallRequest(BaseModel):
5
+ """Request for calling an MCP tool"""
6
+ tool_name: str
7
+ arguments: Dict[str, Any] = {}
8
+
9
+ class ToolCallResponse(BaseModel):
10
+ """Response from MCP tool call"""
11
+ success: bool
12
+ result: Optional[Dict[str, Any]] = None
13
+ error: Optional[str] = None
14
+ tool_name: str
15
+
16
+ class ResourceInfo(BaseModel):
17
+ """Information about an MCP resource"""
18
+ uri: str
19
+ name: str
20
+ description: Optional[str] = None
21
+ mime_type: str
22
+
23
+ class ToolInfo(BaseModel):
24
+ """Information about an MCP tool"""
25
+ name: str
26
+ description: str
27
+ input_schema: Dict[str, Any]
28
+
29
+ class ResourceListResponse(BaseModel):
30
+ """Response for listing resources"""
31
+ resources: List[ResourceInfo]
32
+ count: int
33
+
34
+ class ToolListResponse(BaseModel):
35
+ """Response for listing tools"""
36
+ tools: List[ToolInfo]
37
+ count: int
requirements.txt CHANGED
@@ -1,20 +1,23 @@
1
- fastapi==0.104.1
2
- uvicorn[standard]==0.24.0
3
- python-multipart==0.0.6
4
- python-dotenv==1.0.0
5
- pydantic==2.5.0
6
 
7
  # API Clients
8
- requests==2.31.0
9
- groq==0.9.0
10
 
11
  # Audio processing (optionnel si vous avez besoin de traitement local)
12
- soundfile==0.12.1
13
 
14
  # Hugging Face
15
- transformers==4.35.0
16
- torch==2.0.1+cpu
17
- --extra-index-url https://download.pytorch.org/whl/cpu
18
 
19
  # Autres dépendances
20
- numpy==1.26.4
 
 
 
 
 
1
+ fastapi>=0.115.0
2
+ uvicorn[standard]>=0.29.0
3
+ python-multipart>=0.0.6
4
+ python-dotenv>=1.0.0
5
+ pydantic>=2.5.0
6
 
7
  # API Clients
8
+ requests>=2.31.0
9
+ groq>=0.9.0
10
 
11
  # Audio processing (optionnel si vous avez besoin de traitement local)
12
+ soundfile>=0.12.1
13
 
14
  # Hugging Face
15
+ transformers>=4.35.0
16
+ torch>=2.0.1
 
17
 
18
  # Autres dépendances
19
+ numpy>=1.26.4
20
+
21
+ mcp>=1.0.0
22
+ # Note: fastapi-mcp peut ne pas exister officiellement,
23
+ # vous devrez probablement créer votre propre intégration
routes/mcp_routes.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, HTTPException, Depends
2
+ from typing import List, Dict, Any
3
+ import logging
4
+ from models.mcp_models import (
5
+ ToolCallRequest,
6
+ ToolCallResponse,
7
+ ResourceListResponse,
8
+ ToolListResponse
9
+ )
10
+ from services.mcp_service import get_mcp_server
11
+
12
+ router = APIRouter(prefix="/mcp", tags=["MCP"])
13
+ logger = logging.getLogger(__name__)
14
+
15
+ @router.get("/health")
16
+ async def mcp_health():
17
+ """Health check for MCP server"""
18
+ return {
19
+ "status": "healthy",
20
+ "service": "Model Context Protocol",
21
+ "version": "1.0.0"
22
+ }
23
+
24
+ @router.get("/resources", response_model=ResourceListResponse)
25
+ async def list_resources():
26
+ """List all MCP resources"""
27
+ try:
28
+ server = get_mcp_server()
29
+ resources = await server.list_resources()
30
+ return ResourceListResponse(
31
+ resources=resources,
32
+ count=len(resources)
33
+ )
34
+ except Exception as e:
35
+ logger.error(f"Error listing resources: {str(e)}")
36
+ raise HTTPException(status_code=500, detail=str(e))
37
+
38
+ @router.get("/tools", response_model=ToolListResponse)
39
+ async def list_tools():
40
+ """List all MCP tools"""
41
+ try:
42
+ server = get_mcp_server()
43
+ tools = await server.list_tools()
44
+ return ToolListResponse(
45
+ tools=tools,
46
+ count=len(tools)
47
+ )
48
+ except Exception as e:
49
+ logger.error(f"Error listing tools: {str(e)}")
50
+ raise HTTPException(status_code=500, detail=str(e))
51
+
52
+ @router.post("/tools/call", response_model=ToolCallResponse)
53
+ async def call_tool(request: ToolCallRequest):
54
+ """Call an MCP tool"""
55
+ try:
56
+ server = get_mcp_server()
57
+ result = await server.call_tool(
58
+ tool_name=request.tool_name,
59
+ arguments=request.arguments
60
+ )
61
+
62
+ return ToolCallResponse(
63
+ success=True,
64
+ result=result,
65
+ tool_name=request.tool_name
66
+ )
67
+ except HTTPException:
68
+ raise
69
+ except Exception as e:
70
+ logger.error(f"Error calling tool {request.tool_name}: {str(e)}")
71
+ return ToolCallResponse(
72
+ success=False,
73
+ error=str(e),
74
+ tool_name=request.tool_name
75
+ )
76
+
77
+ @router.post("/tools/batch")
78
+ async def batch_call_tools(requests: List[ToolCallRequest]):
79
+ """Call multiple MCP tools"""
80
+ results = []
81
+ for request in requests:
82
+ try:
83
+ server = get_mcp_server()
84
+ result = await server.call_tool(
85
+ tool_name=request.tool_name,
86
+ arguments=request.arguments
87
+ )
88
+ results.append({
89
+ "tool_name": request.tool_name,
90
+ "success": True,
91
+ "result": result
92
+ })
93
+ except Exception as e:
94
+ results.append({
95
+ "tool_name": request.tool_name,
96
+ "success": False,
97
+ "error": str(e)
98
+ })
99
+
100
+ return {
101
+ "results": results,
102
+ "total": len(results),
103
+ "successful": sum(1 for r in results if r["success"])
104
+ }
services/mcp_service.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mcp.server import MCPServer
2
+ from fastapi import FastAPI
3
+ import logging
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ _mcp_server = None
8
+
9
+ def init_mcp_server(app: FastAPI):
10
+ """Initialize MCP server"""
11
+ global _mcp_server
12
+ try:
13
+ _mcp_server = MCPServer(app)
14
+ logger.info("✓ MCP Server initialized successfully")
15
+ return _mcp_server
16
+ except Exception as e:
17
+ logger.error(f"✗ Failed to initialize MCP server: {str(e)}")
18
+ raise
19
+
20
+ def get_mcp_server() -> MCPServer:
21
+ """Get MCP server instance"""
22
+ if _mcp_server is None:
23
+ raise RuntimeError("MCP server not initialized")
24
+ return _mcp_server