File size: 3,963 Bytes
95cb26e
4a13628
95cb26e
4a13628
 
 
95cb26e
 
 
4a13628
95cb26e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4a13628
95cb26e
4a13628
 
 
95cb26e
4a13628
 
 
 
95cb26e
 
4a13628
95cb26e
 
 
 
4a13628
 
 
95cb26e
 
 
4a13628
95cb26e
 
 
918acab
95cb26e
 
 
 
 
 
 
 
 
 
 
4a13628
 
95cb26e
 
 
 
 
 
 
 
 
 
 
4a13628
95cb26e
 
 
 
 
 
4a13628
 
 
95cb26e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import logging
import torch

logger = logging.getLogger(__name__)

# Global chatbot components
chatbot_pipeline = None
chat_history = {}

def load_chatbot_model():
    """Load the free DialoGPT model for chatbot"""
    global chatbot_pipeline
    try:
        logger.info("Loading DialoGPT chatbot model...")
        
        # Use DialoGPT medium for better responses
        chatbot_pipeline = pipeline(
            "text-generation",
            model="microsoft/DialoGPT-medium",
            tokenizer="microsoft/DialoGPT-medium",
            device="cpu"
        )
        logger.info("βœ“ DialoGPT chatbot model loaded successfully")
    except Exception as e:
        logger.error(f"βœ— Failed to load DialoGPT model: {str(e)}")
        chatbot_pipeline = None

async def get_chatbot_response(user_text: str, user_id: str = "default") -> str:
    """
    Generate chatbot response using free DialoGPT model.
    
    Args:
        user_text: User input text
        user_id: Unique user ID for maintaining conversation history
    
    Returns:
        Chatbot response text
    """
    global chatbot_pipeline
    
    try:
        if chatbot_pipeline is None:
            load_chatbot_model()
            if chatbot_pipeline is None:
                return get_fallback_response(user_text)
        
        logger.info(f"Generating chatbot response for: '{user_text}'")
        
        # Get or initialize chat history for this user
        if user_id not in chat_history:
            chat_history[user_id] = []
        
        # Prepare conversation context
        conversation = chat_history[user_id] + [user_text]
        context = " ".join(conversation[-3:])  # Use last 3 exchanges as context
        
        # Generate response
        response = chatbot_pipeline(
            context,
            max_length=150,
            num_return_sequences=1,
            pad_token_id=chatbot_pipeline.tokenizer.eos_token_id,
            no_repeat_ngram_size=3,
            do_sample=True,
            top_k=50,
            top_p=0.95,
            temperature=0.7
        )
        
        bot_response = response[0]['generated_text'].strip()
        
        # Extract only the new response (remove the input context)
        if context in bot_response:
            bot_response = bot_response.replace(context, "").strip()
        
        # Clean up the response
        bot_response = clean_response(bot_response)
        
        # Update chat history
        chat_history[user_id].extend([user_text, bot_response])
        
        # Keep only recent history (last 4 exchanges)
        if len(chat_history[user_id]) > 8:
            chat_history[user_id] = chat_history[user_id][-8:]
        
        logger.info(f"βœ“ Response generated: '{bot_response}'")
        return bot_response
        
    except Exception as e:
        logger.error(f"βœ— Chatbot response failed: {str(e)}")
        return get_fallback_response(user_text)


def clean_response(response: str) -> str:
    """Clean and format the chatbot response"""
    # Remove extra spaces
    response = ' '.join(response.split())
    
    # Ensure proper sentence ending
    if response and not response.endswith(('.', '!', '?')):
        response += '.'
    
    # Limit response length
    if len(response) > 200:
        response = response[:197] + '...'
    
    return response


def get_fallback_response(user_text: str) -> str:
    """Provide fallback responses when model fails"""
    fallback_responses = [
        f"I understand you said: '{user_text}'. Could you tell me more about that?",
        f"That's interesting! You mentioned: '{user_text}'. What would you like to know?",
        f"Thanks for sharing! Regarding '{user_text}', how can I help you?",
        f"I heard you say: '{user_text}'. Could you elaborate on that?"
    ]
    
    import random
    return random.choice(fallback_responses)