malek-messaoudii
feat: Add extract_topic and voice_chat tools to MCP service, enhancing functionality for topic extraction and chatbot responses. Update models and routes to support new features.
1ce17ff
| from pydantic import BaseModel, Field, ConfigDict | |
| from typing import Any, Dict, List, Optional | |
| class ToolCallRequest(BaseModel): | |
| """Request for calling an MCP tool""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "tool_name": "detect_stance", | |
| "arguments": { | |
| "topic": "Climate change is real", | |
| "argument": "Rising global temperatures prove it" | |
| } | |
| } | |
| } | |
| ) | |
| tool_name: str = Field(..., description="Name of the MCP tool to call (e.g., 'detect_stance', 'match_keypoint_argument', 'transcribe_audio', 'generate_speech', 'generate_argument', 'extract_topic', 'voice_chat')") | |
| arguments: Dict[str, Any] = Field(default_factory=dict, description="Arguments for the tool (varies by tool)") | |
| class ToolCallResponse(BaseModel): | |
| """Response from MCP tool call""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "success": True, | |
| "result": { | |
| "predicted_stance": "PRO", | |
| "confidence": 0.9598, | |
| "probability_con": 0.0402, | |
| "probability_pro": 0.9598 | |
| }, | |
| "error": None, | |
| "tool_name": "detect_stance" | |
| } | |
| } | |
| ) | |
| success: bool = Field(..., description="Whether the tool call was successful") | |
| result: Optional[Dict[str, Any]] = Field(None, description="Result from the tool call") | |
| error: Optional[str] = Field(None, description="Error message if the call failed") | |
| tool_name: str = Field(..., description="Name of the tool that was called") | |
| # Response models for individual MCP tools | |
| class DetectStanceResponse(BaseModel): | |
| """Response model for stance detection""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "predicted_stance": "PRO", | |
| "confidence": 0.9598, | |
| "probability_con": 0.0402, | |
| "probability_pro": 0.9598 | |
| } | |
| } | |
| ) | |
| predicted_stance: str = Field(..., description="PRO or CON") | |
| confidence: float = Field(..., ge=0.0, le=1.0, description="Confidence score") | |
| probability_con: float = Field(..., ge=0.0, le=1.0) | |
| probability_pro: float = Field(..., ge=0.0, le=1.0) | |
| class MatchKeypointResponse(BaseModel): | |
| """Response model for keypoint matching""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "prediction": 1, | |
| "label": "apparie", | |
| "confidence": 0.8157, | |
| "probabilities": { | |
| "non_apparie": 0.1843, | |
| "apparie": 0.8157 | |
| } | |
| } | |
| } | |
| ) | |
| prediction: int = Field(..., description="1 = apparie, 0 = non_apparie") | |
| label: str = Field(..., description="apparie or non_apparie") | |
| confidence: float = Field(..., ge=0.0, le=1.0, description="Confidence score") | |
| probabilities: Dict[str, float] = Field(..., description="Dictionary of class probabilities") | |
| class TranscribeAudioResponse(BaseModel): | |
| """Response model for audio transcription""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "text": "Hello, this is the transcribed text from the audio file." | |
| } | |
| } | |
| ) | |
| text: str = Field(..., description="Transcribed text from audio") | |
| class GenerateSpeechResponse(BaseModel): | |
| """Response model for speech generation""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "audio_path": "temp_audio/tts_e9b78164.wav" | |
| } | |
| } | |
| ) | |
| audio_path: str = Field(..., description="Path to generated audio file") | |
| class ExtractTopicResponse(BaseModel): | |
| """Response model for topic extraction""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "text": "Governments should subsidize electric cars to encourage adoption.", | |
| "topic": "government subsidies for electric vehicle adoption", | |
| "timestamp": "2024-01-01T12:00:00" | |
| } | |
| } | |
| ) | |
| text: str = Field(..., description="The input text") | |
| topic: str = Field(..., description="The extracted topic") | |
| timestamp: Optional[str] = Field(None, description="Timestamp of extraction") | |
| class VoiceChatResponse(BaseModel): | |
| """Response model for voice chat""" | |
| model_config = ConfigDict( | |
| json_schema_extra={ | |
| "example": { | |
| "user_input": "What is climate change?", | |
| "conversation_id": "uuid-here", | |
| "response": "Climate change refers to long-term changes in global temperatures and weather patterns.", | |
| "timestamp": "2024-01-01T12:00:00" | |
| } | |
| } | |
| ) | |
| user_input: str = Field(..., description="The user's input text") | |
| conversation_id: Optional[str] = Field(None, description="The conversation ID") | |
| response: str = Field(..., description="The chatbot's response") | |
| timestamp: Optional[str] = Field(None, description="Timestamp of response") | |
| class ResourceInfo(BaseModel): | |
| """Information about an MCP resource""" | |
| uri: str | |
| name: str | |
| description: Optional[str] = None | |
| mime_type: str | |
| class ToolInfo(BaseModel): | |
| """Information about an MCP tool""" | |
| name: str | |
| description: str | |
| input_schema: Dict[str, Any] | |
| class ResourceListResponse(BaseModel): | |
| """Response for listing resources""" | |
| resources: List[ResourceInfo] | |
| count: int | |
| class ToolListResponse(BaseModel): | |
| """Response for listing tools""" | |
| tools: List[ToolInfo] | |
| count: int |