malek-messaoudii
commited on
Commit
·
1ce17ff
1
Parent(s):
e97ac87
feat: Add extract_topic and voice_chat tools to MCP service, enhancing functionality for topic extraction and chatbot responses. Update models and routes to support new features.
Browse files- models/mcp_models.py +35 -1
- routes/mcp_routes.py +131 -1
- services/mcp_service.py +29 -1
models/mcp_models.py
CHANGED
|
@@ -15,7 +15,7 @@ class ToolCallRequest(BaseModel):
|
|
| 15 |
}
|
| 16 |
)
|
| 17 |
|
| 18 |
-
tool_name: str = Field(..., description="Name of the MCP tool to call (e.g., 'detect_stance', 'match_keypoint_argument', 'transcribe_audio', 'generate_speech', 'generate_argument')")
|
| 19 |
arguments: Dict[str, Any] = Field(default_factory=dict, description="Arguments for the tool (varies by tool)")
|
| 20 |
|
| 21 |
class ToolCallResponse(BaseModel):
|
|
@@ -105,6 +105,40 @@ class GenerateSpeechResponse(BaseModel):
|
|
| 105 |
|
| 106 |
audio_path: str = Field(..., description="Path to generated audio file")
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
class ResourceInfo(BaseModel):
|
| 109 |
"""Information about an MCP resource"""
|
| 110 |
uri: str
|
|
|
|
| 15 |
}
|
| 16 |
)
|
| 17 |
|
| 18 |
+
tool_name: str = Field(..., description="Name of the MCP tool to call (e.g., 'detect_stance', 'match_keypoint_argument', 'transcribe_audio', 'generate_speech', 'generate_argument', 'extract_topic', 'voice_chat')")
|
| 19 |
arguments: Dict[str, Any] = Field(default_factory=dict, description="Arguments for the tool (varies by tool)")
|
| 20 |
|
| 21 |
class ToolCallResponse(BaseModel):
|
|
|
|
| 105 |
|
| 106 |
audio_path: str = Field(..., description="Path to generated audio file")
|
| 107 |
|
| 108 |
+
class ExtractTopicResponse(BaseModel):
|
| 109 |
+
"""Response model for topic extraction"""
|
| 110 |
+
model_config = ConfigDict(
|
| 111 |
+
json_schema_extra={
|
| 112 |
+
"example": {
|
| 113 |
+
"text": "Governments should subsidize electric cars to encourage adoption.",
|
| 114 |
+
"topic": "government subsidies for electric vehicle adoption",
|
| 115 |
+
"timestamp": "2024-01-01T12:00:00"
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
text: str = Field(..., description="The input text")
|
| 121 |
+
topic: str = Field(..., description="The extracted topic")
|
| 122 |
+
timestamp: Optional[str] = Field(None, description="Timestamp of extraction")
|
| 123 |
+
|
| 124 |
+
class VoiceChatResponse(BaseModel):
|
| 125 |
+
"""Response model for voice chat"""
|
| 126 |
+
model_config = ConfigDict(
|
| 127 |
+
json_schema_extra={
|
| 128 |
+
"example": {
|
| 129 |
+
"user_input": "What is climate change?",
|
| 130 |
+
"conversation_id": "uuid-here",
|
| 131 |
+
"response": "Climate change refers to long-term changes in global temperatures and weather patterns.",
|
| 132 |
+
"timestamp": "2024-01-01T12:00:00"
|
| 133 |
+
}
|
| 134 |
+
}
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
user_input: str = Field(..., description="The user's input text")
|
| 138 |
+
conversation_id: Optional[str] = Field(None, description="The conversation ID")
|
| 139 |
+
response: str = Field(..., description="The chatbot's response")
|
| 140 |
+
timestamp: Optional[str] = Field(None, description="Timestamp of response")
|
| 141 |
+
|
| 142 |
class ResourceInfo(BaseModel):
|
| 143 |
"""Information about an MCP resource"""
|
| 144 |
uri: str
|
routes/mcp_routes.py
CHANGED
|
@@ -14,6 +14,8 @@ from services.mcp_service import mcp_server
|
|
| 14 |
from services.stance_model_manager import stance_model_manager
|
| 15 |
from services.label_model_manager import kpa_model_manager
|
| 16 |
from services.generate_model_manager import generate_model_manager
|
|
|
|
|
|
|
| 17 |
from models.mcp_models import (
|
| 18 |
ToolListResponse,
|
| 19 |
ToolInfo,
|
|
@@ -22,7 +24,9 @@ from models.mcp_models import (
|
|
| 22 |
DetectStanceResponse,
|
| 23 |
MatchKeypointResponse,
|
| 24 |
TranscribeAudioResponse,
|
| 25 |
-
GenerateSpeechResponse
|
|
|
|
|
|
|
| 26 |
)
|
| 27 |
from models.generate import GenerateRequest, GenerateResponse
|
| 28 |
from datetime import datetime
|
|
@@ -75,6 +79,30 @@ class GenerateSpeechRequest(BaseModel):
|
|
| 75 |
}
|
| 76 |
}
|
| 77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
|
| 80 |
# ===== Routes MCP =====
|
|
@@ -90,6 +118,8 @@ async def mcp_health():
|
|
| 90 |
"transcribe_audio",
|
| 91 |
"generate_speech",
|
| 92 |
"generate_argument",
|
|
|
|
|
|
|
| 93 |
"health_check"
|
| 94 |
]
|
| 95 |
return {
|
|
@@ -167,6 +197,29 @@ async def list_mcp_tools():
|
|
| 167 |
"required": ["topic", "position"]
|
| 168 |
}
|
| 169 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
ToolInfo(
|
| 171 |
name="health_check",
|
| 172 |
description="Health check pour le serveur MCP",
|
|
@@ -244,6 +297,27 @@ async def call_mcp_tool(request: ToolCallRequest):
|
|
| 244 |
}
|
| 245 |
}
|
| 246 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
"""
|
| 248 |
try:
|
| 249 |
result = await mcp_server.call_tool(request.tool_name, request.arguments)
|
|
@@ -510,6 +584,62 @@ async def mcp_generate_argument(request: GenerateRequest):
|
|
| 510 |
logger.error(f"Error in generate_argument: {e}", exc_info=True)
|
| 511 |
raise HTTPException(status_code=500, detail=f"Error executing tool generate_argument: {e}")
|
| 512 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 513 |
@router.get("/tools/health-check", summary="Health check MCP (outil)")
|
| 514 |
async def mcp_tool_health_check() -> Dict[str, Any]:
|
| 515 |
"""Health check via l'outil MCP"""
|
|
|
|
| 14 |
from services.stance_model_manager import stance_model_manager
|
| 15 |
from services.label_model_manager import kpa_model_manager
|
| 16 |
from services.generate_model_manager import generate_model_manager
|
| 17 |
+
from services.topic_service import topic_service
|
| 18 |
+
from services.chat_service import generate_chat_response
|
| 19 |
from models.mcp_models import (
|
| 20 |
ToolListResponse,
|
| 21 |
ToolInfo,
|
|
|
|
| 24 |
DetectStanceResponse,
|
| 25 |
MatchKeypointResponse,
|
| 26 |
TranscribeAudioResponse,
|
| 27 |
+
GenerateSpeechResponse,
|
| 28 |
+
ExtractTopicResponse,
|
| 29 |
+
VoiceChatResponse
|
| 30 |
)
|
| 31 |
from models.generate import GenerateRequest, GenerateResponse
|
| 32 |
from datetime import datetime
|
|
|
|
| 79 |
}
|
| 80 |
}
|
| 81 |
|
| 82 |
+
class ExtractTopicRequest(BaseModel):
|
| 83 |
+
"""Request pour extraire un topic d'un texte"""
|
| 84 |
+
text: str = Field(..., min_length=5, max_length=5000, description="Le texte/argument à partir duquel extraire le topic")
|
| 85 |
+
|
| 86 |
+
class Config:
|
| 87 |
+
json_schema_extra = {
|
| 88 |
+
"example": {
|
| 89 |
+
"text": "Governments should subsidize electric cars to encourage adoption."
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
class VoiceChatRequest(BaseModel):
|
| 94 |
+
"""Request pour générer une réponse de chatbot vocal"""
|
| 95 |
+
user_input: str = Field(..., description="L'entrée utilisateur (en anglais)")
|
| 96 |
+
conversation_id: Optional[str] = Field(None, description="ID de conversation pour maintenir le contexte")
|
| 97 |
+
|
| 98 |
+
class Config:
|
| 99 |
+
json_schema_extra = {
|
| 100 |
+
"example": {
|
| 101 |
+
"user_input": "What is climate change?",
|
| 102 |
+
"conversation_id": "optional-conversation-id"
|
| 103 |
+
}
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
|
| 107 |
|
| 108 |
# ===== Routes MCP =====
|
|
|
|
| 118 |
"transcribe_audio",
|
| 119 |
"generate_speech",
|
| 120 |
"generate_argument",
|
| 121 |
+
"extract_topic",
|
| 122 |
+
"voice_chat",
|
| 123 |
"health_check"
|
| 124 |
]
|
| 125 |
return {
|
|
|
|
| 197 |
"required": ["topic", "position"]
|
| 198 |
}
|
| 199 |
),
|
| 200 |
+
ToolInfo(
|
| 201 |
+
name="extract_topic",
|
| 202 |
+
description="Extrait un topic à partir d'un texte/argument donné",
|
| 203 |
+
input_schema={
|
| 204 |
+
"type": "object",
|
| 205 |
+
"properties": {
|
| 206 |
+
"text": {"type": "string", "description": "Le texte/argument à partir duquel extraire le topic"}
|
| 207 |
+
},
|
| 208 |
+
"required": ["text"]
|
| 209 |
+
}
|
| 210 |
+
),
|
| 211 |
+
ToolInfo(
|
| 212 |
+
name="voice_chat",
|
| 213 |
+
description="Génère une réponse de chatbot vocal en anglais",
|
| 214 |
+
input_schema={
|
| 215 |
+
"type": "object",
|
| 216 |
+
"properties": {
|
| 217 |
+
"user_input": {"type": "string", "description": "L'entrée utilisateur (en anglais)"},
|
| 218 |
+
"conversation_id": {"type": "string", "description": "ID de conversation pour maintenir le contexte (optionnel)"}
|
| 219 |
+
},
|
| 220 |
+
"required": ["user_input"]
|
| 221 |
+
}
|
| 222 |
+
),
|
| 223 |
ToolInfo(
|
| 224 |
name="health_check",
|
| 225 |
description="Health check pour le serveur MCP",
|
|
|
|
| 297 |
}
|
| 298 |
}
|
| 299 |
```
|
| 300 |
+
|
| 301 |
+
6. **extract_topic** - Extraire un topic d'un texte:
|
| 302 |
+
```json
|
| 303 |
+
{
|
| 304 |
+
"tool_name": "extract_topic",
|
| 305 |
+
"arguments": {
|
| 306 |
+
"text": "Governments should subsidize electric cars to encourage adoption."
|
| 307 |
+
}
|
| 308 |
+
}
|
| 309 |
+
```
|
| 310 |
+
|
| 311 |
+
7. **voice_chat** - Générer une réponse de chatbot vocal:
|
| 312 |
+
```json
|
| 313 |
+
{
|
| 314 |
+
"tool_name": "voice_chat",
|
| 315 |
+
"arguments": {
|
| 316 |
+
"user_input": "What is climate change?",
|
| 317 |
+
"conversation_id": "optional-conversation-id"
|
| 318 |
+
}
|
| 319 |
+
}
|
| 320 |
+
```
|
| 321 |
"""
|
| 322 |
try:
|
| 323 |
result = await mcp_server.call_tool(request.tool_name, request.arguments)
|
|
|
|
| 584 |
logger.error(f"Error in generate_argument: {e}", exc_info=True)
|
| 585 |
raise HTTPException(status_code=500, detail=f"Error executing tool generate_argument: {e}")
|
| 586 |
|
| 587 |
+
@router.post("/tools/extract-topic", response_model=ExtractTopicResponse, summary="Extraire un topic d'un texte")
|
| 588 |
+
async def mcp_extract_topic(request: ExtractTopicRequest):
|
| 589 |
+
"""Extrait un topic à partir d'un texte/argument donné"""
|
| 590 |
+
try:
|
| 591 |
+
# Vérifier que le service est initialisé
|
| 592 |
+
if not topic_service.initialized:
|
| 593 |
+
topic_service.initialize()
|
| 594 |
+
|
| 595 |
+
# Appeler directement le service (plus fiable que via MCP)
|
| 596 |
+
topic_text = topic_service.extract_topic(request.text)
|
| 597 |
+
|
| 598 |
+
# Construire la réponse structurée
|
| 599 |
+
response = ExtractTopicResponse(
|
| 600 |
+
text=request.text,
|
| 601 |
+
topic=topic_text,
|
| 602 |
+
timestamp=datetime.now().isoformat()
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
logger.info(f"Topic extracted from text '{request.text[:50]}...': {topic_text[:50]}...")
|
| 606 |
+
return response
|
| 607 |
+
|
| 608 |
+
except ValueError as e:
|
| 609 |
+
logger.error(f"Validation error in extract_topic: {str(e)}")
|
| 610 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 611 |
+
except Exception as e:
|
| 612 |
+
logger.error(f"Error in extract_topic: {e}", exc_info=True)
|
| 613 |
+
raise HTTPException(status_code=500, detail=f"Error executing tool extract_topic: {e}")
|
| 614 |
+
|
| 615 |
+
@router.post("/tools/voice-chat", response_model=VoiceChatResponse, summary="Générer une réponse de chatbot vocal")
|
| 616 |
+
async def mcp_voice_chat(request: VoiceChatRequest):
|
| 617 |
+
"""Génère une réponse de chatbot vocal en anglais"""
|
| 618 |
+
try:
|
| 619 |
+
# Appeler directement le service (plus fiable que via MCP)
|
| 620 |
+
response_text = generate_chat_response(
|
| 621 |
+
user_input=request.user_input,
|
| 622 |
+
conversation_id=request.conversation_id
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
# Construire la réponse structurée
|
| 626 |
+
response = VoiceChatResponse(
|
| 627 |
+
user_input=request.user_input,
|
| 628 |
+
conversation_id=request.conversation_id,
|
| 629 |
+
response=response_text,
|
| 630 |
+
timestamp=datetime.now().isoformat()
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
logger.info(f"Voice chat response generated for input '{request.user_input[:50]}...': {response_text[:50]}...")
|
| 634 |
+
return response
|
| 635 |
+
|
| 636 |
+
except ValueError as e:
|
| 637 |
+
logger.error(f"Validation error in voice_chat: {str(e)}")
|
| 638 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 639 |
+
except Exception as e:
|
| 640 |
+
logger.error(f"Error in voice_chat: {e}", exc_info=True)
|
| 641 |
+
raise HTTPException(status_code=500, detail=f"Error executing tool voice_chat: {e}")
|
| 642 |
+
|
| 643 |
@router.get("/tools/health-check", summary="Health check MCP (outil)")
|
| 644 |
async def mcp_tool_health_check() -> Dict[str, Any]:
|
| 645 |
"""Health check via l'outil MCP"""
|
services/mcp_service.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
"""Service pour initialiser le serveur MCP avec FastMCP"""
|
| 2 |
|
| 3 |
from mcp.server.fastmcp import FastMCP
|
| 4 |
-
from typing import Dict, Any
|
| 5 |
import logging
|
| 6 |
|
| 7 |
from fastapi import FastAPI
|
|
@@ -11,6 +11,8 @@ from services.label_model_manager import kpa_model_manager
|
|
| 11 |
from services.stt_service import speech_to_text
|
| 12 |
from services.tts_service import text_to_speech
|
| 13 |
from services.generate_model_manager import generate_model_manager
|
|
|
|
|
|
|
| 14 |
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
|
@@ -62,6 +64,30 @@ def generate_argument(topic: str, position: str) -> Dict[str, Any]:
|
|
| 62 |
"argument": argument
|
| 63 |
}
|
| 64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
@mcp_server.resource("debate://prompt")
|
| 66 |
def get_debate_prompt() -> str:
|
| 67 |
return "Tu es un expert en débat. Génère 3 arguments PRO pour le topic donné. Sois concis et persuasif."
|
|
@@ -78,6 +104,8 @@ def health_check() -> Dict[str, Any]:
|
|
| 78 |
"transcribe_audio",
|
| 79 |
"generate_speech",
|
| 80 |
"generate_argument",
|
|
|
|
|
|
|
| 81 |
"health_check"
|
| 82 |
]
|
| 83 |
except Exception:
|
|
|
|
| 1 |
"""Service pour initialiser le serveur MCP avec FastMCP"""
|
| 2 |
|
| 3 |
from mcp.server.fastmcp import FastMCP
|
| 4 |
+
from typing import Dict, Any, Optional
|
| 5 |
import logging
|
| 6 |
|
| 7 |
from fastapi import FastAPI
|
|
|
|
| 11 |
from services.stt_service import speech_to_text
|
| 12 |
from services.tts_service import text_to_speech
|
| 13 |
from services.generate_model_manager import generate_model_manager
|
| 14 |
+
from services.topic_service import topic_service
|
| 15 |
+
from services.chat_service import generate_chat_response
|
| 16 |
|
| 17 |
logger = logging.getLogger(__name__)
|
| 18 |
|
|
|
|
| 64 |
"argument": argument
|
| 65 |
}
|
| 66 |
|
| 67 |
+
@mcp_server.tool()
|
| 68 |
+
def extract_topic(text: str) -> Dict[str, Any]:
|
| 69 |
+
"""Extract a topic from the given text/argument"""
|
| 70 |
+
if not topic_service.initialized:
|
| 71 |
+
topic_service.initialize()
|
| 72 |
+
topic = topic_service.extract_topic(text)
|
| 73 |
+
return {
|
| 74 |
+
"text": text,
|
| 75 |
+
"topic": topic
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
@mcp_server.tool()
|
| 79 |
+
def voice_chat(user_input: str, conversation_id: Optional[str] = None) -> Dict[str, Any]:
|
| 80 |
+
"""Generate a chatbot response for voice chat (English only)"""
|
| 81 |
+
response_text = generate_chat_response(
|
| 82 |
+
user_input=user_input,
|
| 83 |
+
conversation_id=conversation_id
|
| 84 |
+
)
|
| 85 |
+
return {
|
| 86 |
+
"user_input": user_input,
|
| 87 |
+
"conversation_id": conversation_id,
|
| 88 |
+
"response": response_text
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
@mcp_server.resource("debate://prompt")
|
| 92 |
def get_debate_prompt() -> str:
|
| 93 |
return "Tu es un expert en débat. Génère 3 arguments PRO pour le topic donné. Sois concis et persuasif."
|
|
|
|
| 104 |
"transcribe_audio",
|
| 105 |
"generate_speech",
|
| 106 |
"generate_argument",
|
| 107 |
+
"extract_topic",
|
| 108 |
+
"voice_chat",
|
| 109 |
"health_check"
|
| 110 |
]
|
| 111 |
except Exception:
|