| from services.gemini_client import get_gemini_client | |
| import logging | |
| logger = logging.getLogger(__name__) | |
| async def get_chatbot_response(user_text: str) -> str: | |
| """ | |
| Generate chatbot response using Gemini API. | |
| Args: | |
| user_text: User input text | |
| Returns: | |
| Chatbot response text | |
| Raises: | |
| Exception: If response generation fails | |
| """ | |
| try: | |
| client = get_gemini_client() | |
| logger.info(f"Generating chatbot response for: '{user_text}'") | |
| # Create a system prompt for better responses | |
| system_prompt = """You are a helpful, friendly AI assistant. | |
| Respond concisely and naturally to user queries. | |
| Keep responses brief (1-2 sentences) for voice interaction.""" | |
| # Combine system prompt with user input | |
| full_prompt = f"{system_prompt}\n\nUser: {user_text}" | |
| response = client.models.generate_content( | |
| model="gemini-2.0-flash-exp", # Using a model that definitely exists | |
| contents=[full_prompt] | |
| ) | |
| response_text = response.text | |
| logger.info(f"β Response generated: '{response_text}'") | |
| return response_text | |
| except Exception as e: | |
| logger.error(f"β Chatbot response failed: {str(e)}") | |
| # Fallback response | |
| return f"I understood you said: '{user_text}'. Could you tell me more?" |