Spaces:
Sleeping
Sleeping
Commit
·
4b954a0
1
Parent(s):
78440ac
Add better logging and direct AI test endpoint
Browse files
app.py
CHANGED
|
@@ -140,6 +140,7 @@ Minimum purchase is 1 roll (67-70 yards)."""
|
|
| 140 |
def generate_response(self, user_message: str) -> str:
|
| 141 |
"""Generate response using HuggingFace Inference API"""
|
| 142 |
if not self.client:
|
|
|
|
| 143 |
return self.get_mock_response(user_message)
|
| 144 |
|
| 145 |
try:
|
|
@@ -151,6 +152,8 @@ Minimum purchase is 1 roll (67-70 yards)."""
|
|
| 151 |
# For other models, use the full prompt
|
| 152 |
prompt = f"<|system|>\n{self.system_prompt}\n<|user|>\n{user_message}\n<|assistant|>\n"
|
| 153 |
|
|
|
|
|
|
|
| 154 |
# Generate response
|
| 155 |
response = self.client.text_generation(
|
| 156 |
prompt,
|
|
@@ -162,6 +165,8 @@ Minimum purchase is 1 roll (67-70 yards)."""
|
|
| 162 |
stop_sequences=["<|end|>", "<|user|>", "User:", "Assistant:"]
|
| 163 |
)
|
| 164 |
|
|
|
|
|
|
|
| 165 |
# Clean up the response
|
| 166 |
if "Assistant:" in response:
|
| 167 |
assistant_response = response.split("Assistant:")[-1].strip()
|
|
@@ -173,8 +178,11 @@ Minimum purchase is 1 roll (67-70 yards)."""
|
|
| 173 |
# Remove any remaining special tokens
|
| 174 |
assistant_response = assistant_response.replace("<|end|>", "").replace("<|user|>", "").strip()
|
| 175 |
|
|
|
|
|
|
|
| 176 |
# If response is too short or generic, use mock response
|
| 177 |
if len(assistant_response) < 10 or "I don't know" in assistant_response.lower():
|
|
|
|
| 178 |
return self.get_mock_response(user_message)
|
| 179 |
|
| 180 |
return assistant_response
|
|
@@ -474,6 +482,48 @@ async def test_trained_model():
|
|
| 474 |
"note": "This is a simulated response for HF Spaces demo"
|
| 475 |
}
|
| 476 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 477 |
if __name__ == "__main__":
|
| 478 |
# Get port from environment variable (Hugging Face Spaces uses 7860)
|
| 479 |
port = int(os.getenv("PORT", 7860))
|
|
|
|
| 140 |
def generate_response(self, user_message: str) -> str:
|
| 141 |
"""Generate response using HuggingFace Inference API"""
|
| 142 |
if not self.client:
|
| 143 |
+
logger.warning("No HuggingFace client available, using mock response")
|
| 144 |
return self.get_mock_response(user_message)
|
| 145 |
|
| 146 |
try:
|
|
|
|
| 152 |
# For other models, use the full prompt
|
| 153 |
prompt = f"<|system|>\n{self.system_prompt}\n<|user|>\n{user_message}\n<|assistant|>\n"
|
| 154 |
|
| 155 |
+
logger.info(f"Generating response for prompt: {prompt[:100]}...")
|
| 156 |
+
|
| 157 |
# Generate response
|
| 158 |
response = self.client.text_generation(
|
| 159 |
prompt,
|
|
|
|
| 165 |
stop_sequences=["<|end|>", "<|user|>", "User:", "Assistant:"]
|
| 166 |
)
|
| 167 |
|
| 168 |
+
logger.info(f"Raw AI response: {response[:200]}...")
|
| 169 |
+
|
| 170 |
# Clean up the response
|
| 171 |
if "Assistant:" in response:
|
| 172 |
assistant_response = response.split("Assistant:")[-1].strip()
|
|
|
|
| 178 |
# Remove any remaining special tokens
|
| 179 |
assistant_response = assistant_response.replace("<|end|>", "").replace("<|user|>", "").strip()
|
| 180 |
|
| 181 |
+
logger.info(f"Cleaned AI response: {assistant_response[:100]}...")
|
| 182 |
+
|
| 183 |
# If response is too short or generic, use mock response
|
| 184 |
if len(assistant_response) < 10 or "I don't know" in assistant_response.lower():
|
| 185 |
+
logger.warning("AI response too short, using mock response")
|
| 186 |
return self.get_mock_response(user_message)
|
| 187 |
|
| 188 |
return assistant_response
|
|
|
|
| 482 |
"note": "This is a simulated response for HF Spaces demo"
|
| 483 |
}
|
| 484 |
|
| 485 |
+
@app.post("/api/test/ai")
|
| 486 |
+
async def test_ai_directly(request: ChatRequest):
|
| 487 |
+
"""Test AI directly without fallback to mock responses"""
|
| 488 |
+
try:
|
| 489 |
+
if not ai_assistant.client:
|
| 490 |
+
return {
|
| 491 |
+
"success": False,
|
| 492 |
+
"message": "No HuggingFace client available",
|
| 493 |
+
"response": None
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
# Test with a simple prompt
|
| 497 |
+
test_prompt = f"User: {request.message}\nAssistant:"
|
| 498 |
+
|
| 499 |
+
logger.info(f"Testing AI with prompt: {test_prompt}")
|
| 500 |
+
|
| 501 |
+
response = ai_assistant.client.text_generation(
|
| 502 |
+
test_prompt,
|
| 503 |
+
max_new_tokens=100,
|
| 504 |
+
temperature=0.7,
|
| 505 |
+
top_p=0.9,
|
| 506 |
+
top_k=40
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
logger.info(f"Direct AI response: {response}")
|
| 510 |
+
|
| 511 |
+
return {
|
| 512 |
+
"success": True,
|
| 513 |
+
"message": "AI response generated successfully",
|
| 514 |
+
"raw_response": response,
|
| 515 |
+
"model": ai_assistant.model,
|
| 516 |
+
"api_key_available": bool(ai_assistant.api_key)
|
| 517 |
+
}
|
| 518 |
+
|
| 519 |
+
except Exception as e:
|
| 520 |
+
logger.error(f"Error in direct AI test: {e}")
|
| 521 |
+
return {
|
| 522 |
+
"success": False,
|
| 523 |
+
"message": f"Error: {str(e)}",
|
| 524 |
+
"response": None
|
| 525 |
+
}
|
| 526 |
+
|
| 527 |
if __name__ == "__main__":
|
| 528 |
# Get port from environment variable (Hugging Face Spaces uses 7860)
|
| 529 |
port = int(os.getenv("PORT", 7860))
|