File size: 2,673 Bytes
3b2b211
 
4a13628
 
 
 
3b2b211
95cb26e
4a13628
95cb26e
3b2b211
95cb26e
 
3b2b211
95cb26e
 
 
3b2b211
95cb26e
 
e8aa76b
95cb26e
e8aa76b
95cb26e
 
 
4a13628
e8aa76b
4a13628
95cb26e
 
4a13628
95cb26e
 
 
 
4a13628
 
 
e8aa76b
 
4a13628
3b2b211
95cb26e
e8aa76b
3b2b211
95cb26e
e8aa76b
 
3b2b211
4a13628
 
e8aa76b
 
95cb26e
e8aa76b
 
 
 
 
95cb26e
 
e8aa76b
 
95cb26e
 
 
4a13628
 
 
95cb26e
 
 
3b2b211
95cb26e
e8aa76b
 
3b2b211
95cb26e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# services/chatbot_service.py (CONFIRMED WORKING VERSION)
from transformers import pipeline
import logging

logger = logging.getLogger(__name__)

# Global chatbot pipeline
chatbot_pipeline = None

def load_chatbot_model():
    """Load the free chatbot model"""
    global chatbot_pipeline
    try:
        logger.info("Loading DialoGPT chatbot model...")
        
        chatbot_pipeline = pipeline(
            "text-generation",
            model="microsoft/DialoGPT-small",
            device="cpu"
        )
        logger.info("βœ“ Chatbot model loaded successfully")
    except Exception as e:
        logger.error(f"βœ— Failed to load chatbot model: {str(e)}")
        chatbot_pipeline = None

async def get_chatbot_response(user_text: str, user_id: str = "default") -> str:
    """
    Generate chatbot response using free model.
    """
    global chatbot_pipeline
    
    try:
        if chatbot_pipeline is None:
            load_chatbot_model()
            if chatbot_pipeline is None:
                return get_fallback_response(user_text)
        
        logger.info(f"Generating chatbot response for: '{user_text}'")
        
        # Prepare prompt
        prompt = f"User: {user_text}\nAssistant:"
        
        # Generate response
        response = chatbot_pipeline(
            prompt,
            max_new_tokens=100,
            do_sample=True,
            temperature=0.7,
            top_p=0.9,
            pad_token_id=chatbot_pipeline.tokenizer.eos_token_id
        )
        
        # Extract the response
        generated_text = response[0]['generated_text']
        
        # Extract only the assistant's response
        if "Assistant:" in generated_text:
            bot_response = generated_text.split("Assistant:")[-1].strip()
        else:
            bot_response = generated_text.replace(prompt, "").strip()
        
        # Clean up the response
        if not bot_response:
            bot_response = get_fallback_response(user_text)
        
        logger.info(f"βœ“ Response generated: '{bot_response}'")
        return bot_response
        
    except Exception as e:
        logger.error(f"βœ— Chatbot response failed: {str(e)}")
        return get_fallback_response(user_text)

def get_fallback_response(user_text: str) -> str:
    """Provide fallback responses"""
    fallback_responses = [
        f"I understand you said: '{user_text}'. How can I help you with that?",
        f"That's interesting! Regarding '{user_text}', what would you like to know?",
        f"Thanks for your message about '{user_text}'. How can I assist you further?"
    ]
    
    import random
    return random.choice(fallback_responses)