destinyebuka commited on
Commit
a1dbd9c
·
1 Parent(s): e58e225

backward chain

Browse files
app/ai/graph.py CHANGED
@@ -1,4 +1,4 @@
1
- # app/ai/graph.py FINAL: Proper intent → node routing
2
  from langgraph.graph import StateGraph, START, END
3
  from app.ai.state import ChatState
4
  from app.ai.nodes.intent_node import intent_node
@@ -6,100 +6,82 @@ from app.ai.nodes.role_gate_node import role_gate_node
6
  from app.ai.nodes.search_node import search_node
7
  from app.ai.nodes.draft_node import draft_node
8
  from app.ai.nodes.publish_node import publish_node
 
9
  from structlog import get_logger
10
 
11
  logger = get_logger(__name__)
12
 
13
  workflow = StateGraph(ChatState)
14
 
15
- # ---------- unique node names ----------
16
- workflow.add_node("parse_intent", intent_node) # Detect intent + commands
17
- workflow.add_node("check_permissions", role_gate_node) # Role-based gate
18
- workflow.add_node("search_listings", search_node) # Search execution
19
- workflow.add_node("create_draft", draft_node) # Draft generation (only on draft_ready)
20
- workflow.add_node("handle_publish", publish_node) # Publish to MongoDB
 
21
 
22
- # ---------- ALWAYS start at parse_intent ----------
23
  workflow.add_edge(START, "parse_intent")
24
 
25
- # ---------- Route after intent extraction ----------
26
- def route_after_intent(state):
27
- """Route based on intent detected by intent_node"""
28
  intent = state.get("intent")
29
 
30
- logger.info(f"🔀 Routing after intent", intent=intent, status=state.get("status"))
31
-
32
- # PUBLISH command detected
33
- if intent == "publish":
34
- logger.info("➡️ Routing to: handle_publish")
35
- return "handle_publish"
36
-
37
- # SEARCH intent
38
- if intent == "search":
39
- logger.info("➡️ Routing to: check_permissions (search path)")
40
- return "check_permissions"
41
-
42
- # LIST intent
43
- if intent == "list":
44
- logger.info("➡️ Routing to: check_permissions (list path)")
45
  return "check_permissions"
46
-
47
- # No special intent = stay at END (casual chat)
48
- logger.info("➡️ Routing to: END (casual chat)")
49
- return END
50
 
51
  workflow.add_conditional_edges(
52
  "parse_intent",
53
- route_after_intent,
54
  {
55
  "check_permissions": "check_permissions",
56
- "handle_publish": "handle_publish",
57
- END: END
58
  }
59
  )
60
 
61
- # ---------- Route after permission check ----------
62
  def route_from_permissions(state):
63
- """Route after role-based permission check"""
64
- allowed = state.get("allowed", False)
 
 
 
65
  intent = state.get("intent")
66
- status = state.get("status")
67
 
68
- if not allowed:
69
- logger.info("🚫 Request blocked by permission check")
70
- return END
71
-
72
  if intent == "search":
73
- logger.info("➡️ Routing to: search_listings")
74
  return "search_listings"
75
-
76
- if intent == "list":
77
- # If status is draft_ready, generate draft
78
- if status == "draft_ready":
79
- logger.info("➡️ Routing to: create_draft (generate preview)")
80
- return "create_draft"
81
- # Otherwise end (collecting fields or already previewed)
82
- logger.info("➡️ Routing to: END (collecting or preview active)")
83
  return END
84
-
85
- logger.info("➡️ Routing to: END (no matching intent)")
86
- return END
87
 
88
  workflow.add_conditional_edges(
89
  "check_permissions",
90
  route_from_permissions,
91
  {
92
  "search_listings": "search_listings",
93
- "create_draft": "create_draft",
94
- END: END
95
  }
96
  )
97
 
98
- # ---------- Terminal edges ----------
99
  workflow.add_edge("search_listings", END)
100
- workflow.add_edge("create_draft", END)
101
- workflow.add_edge("handle_publish", END)
102
 
103
- # Compile
 
 
 
 
 
 
104
  agent = workflow.compile()
105
- logger.info("✅ LangGraph compiled with command detection")
 
 
1
+ # app/ai/graph.py - FIXED NODE NAMING CONFLICT
2
  from langgraph.graph import StateGraph, START, END
3
  from app.ai.state import ChatState
4
  from app.ai.nodes.intent_node import intent_node
 
6
  from app.ai.nodes.search_node import search_node
7
  from app.ai.nodes.draft_node import draft_node
8
  from app.ai.nodes.publish_node import publish_node
9
+ from app.ai.utils.intent_extractor import extract_intent_from_state
10
  from structlog import get_logger
11
 
12
  logger = get_logger(__name__)
13
 
14
  workflow = StateGraph(ChatState)
15
 
16
+ # Add all nodes with unique names that don't conflict with state keys
17
+ # ✅ FIXED: Changed node names to avoid conflicts with state fields
18
+ workflow.add_node("parse_intent", intent_node) # was "intent"
19
+ workflow.add_node("check_permissions", role_gate_node) # was "role_gate"
20
+ workflow.add_node("search_listings", search_node) # was "search"
21
+ workflow.add_node("create_draft", draft_node) # was "draft"
22
+ workflow.add_node("handle_publish", publish_node) # was "publish"
23
 
24
+ # START → parse_intent (always start here)
25
  workflow.add_edge(START, "parse_intent")
26
 
27
+ # From parse_intent, route to permission check
28
+ def route_from_intent(state):
29
+ """Route from intent parsing to permission check"""
30
  intent = state.get("intent")
31
 
32
+ # Only check role for listing and search intents
33
+ if intent in ["search", "list"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  return "check_permissions"
35
+ else:
36
+ return END
 
 
37
 
38
  workflow.add_conditional_edges(
39
  "parse_intent",
40
+ route_from_intent,
41
  {
42
  "check_permissions": "check_permissions",
43
+ END: END
 
44
  }
45
  )
46
 
47
+ # From permission check, route to actual operation or END
48
  def route_from_permissions(state):
49
+ """Route after permission check"""
50
+ if not state.get("allowed", False):
51
+ logger.info("🚫 Request blocked by permission check")
52
+ return END
53
+
54
  intent = state.get("intent")
 
55
 
 
 
 
 
56
  if intent == "search":
57
+ logger.info("🔍 Routing to search")
58
  return "search_listings"
59
+ elif intent == "list":
60
+ logger.info("📝 Routing to draft creation")
61
+ return "create_draft"
62
+ else:
 
 
 
 
63
  return END
 
 
 
64
 
65
  workflow.add_conditional_edges(
66
  "check_permissions",
67
  route_from_permissions,
68
  {
69
  "search_listings": "search_listings",
70
+ "create_draft": "create_draft",
71
+ END: END
72
  }
73
  )
74
 
75
+ # search_listings → END
76
  workflow.add_edge("search_listings", END)
 
 
77
 
78
+ # create_draft → handle_publish
79
+ workflow.add_edge("create_draft", "handle_publish")
80
+
81
+ # handle_publish → END
82
+ workflow.add_edge("handle_publish", END)
83
+
84
+ # Compile the graph
85
  agent = workflow.compile()
86
+
87
+ logger.info("✅ LangGraph compiled successfully with unique node names")
app/ai/nodes/draft_node.py CHANGED
@@ -1,70 +1,56 @@
1
- # app/ai/nodes/draft_node.py FINAL: Error handling + validation + observability
2
- import logging
3
  import datetime
4
  from typing import Dict
5
-
6
  from app.database import get_db
7
- from app.core.error_handling import trace_operation, handle_errors
 
 
 
8
  from app.ml.models.ml_listing_extractor import get_ml_extractor
9
 
10
- logger = logging.getLogger(__name__)
11
 
 
12
  ml_extractor = get_ml_extractor()
13
 
14
- # ============================================================
15
- # Amenity Icons Mapping
16
- # ============================================================
17
-
18
  AMENITY_ICONS = {
19
- "wifi": "📶",
20
- "parking": "🅿️",
21
- "furnished": "🛋️",
22
- "washing machine": "🧼",
23
- "washing": "🧼",
24
- "dryer": "🔥",
25
- "balcony": "🎋️",
26
- "pool": "🏊",
27
- "gym": "💪",
28
- "garden": "🌿",
29
- "air conditioning": "❄️",
30
- "ac": "❄️",
31
- "kitchen": "🍳",
32
- "tv": "📺",
33
- "television": "📺",
34
  }
35
 
36
- # ============================================================
37
- # Custom Exceptions
38
- # ============================================================
39
-
40
- class ValidationError(Exception):
41
- """Raised when field validation fails."""
42
- pass
43
-
44
- class DraftGenerationError(Exception):
45
- """Raised when draft generation fails."""
46
- pass
47
-
48
- # ============================================================
49
- # Helpers
50
- # ============================================================
51
-
52
  def _add_amenity_icons(amenities: list) -> str:
53
  """Convert amenities list to string with icons."""
54
  if not amenities:
55
  return ""
 
56
  icons_text = []
57
  for amenity in amenities:
58
  amenity_lower = amenity.lower().strip()
59
- icon = AMENITY_ICONS.get(amenity_lower, "")
60
  icons_text.append(f"{icon} {amenity.title()}")
 
61
  return " | ".join(icons_text)
62
 
63
  def _generate_title(state: Dict) -> str:
64
  """Generate professional title from listing data."""
65
  bedrooms = state.get("bedrooms", "")
 
66
  location = state.get("location", "").title()
67
  listing_type = state.get("listing_type", "").title()
 
68
  if bedrooms and location:
69
  return f"{bedrooms}-Bedroom {listing_type} in {location}"
70
  return f"Property in {location}"
@@ -73,211 +59,181 @@ def _generate_description(state: Dict) -> str:
73
  """Generate professional description from listing data."""
74
  bedrooms = state.get("bedrooms", "")
75
  bathrooms = state.get("bathrooms", "")
 
76
  location = state.get("location", "").title()
77
  amenities = state.get("amenities", [])
78
  price = state.get("price", "")
79
  price_type = state.get("price_type", "").title()
80
  listing_type = state.get("listing_type", "").title()
81
  requirements = state.get("requirements", "")
82
-
83
  desc = f"Spacious {bedrooms}-bedroom, {bathrooms}-bathroom {listing_type} "
84
  desc += f"located in {location}. "
 
85
  if price:
86
  desc += f"Priced at {price:,} {price_type}. "
 
87
  if amenities:
88
  amenities_str = ", ".join(amenities)
89
  desc += f"Fully furnished with modern amenities including {amenities_str}. "
 
90
  if requirements:
91
  desc += f"Requirements: {requirements}. "
 
92
  desc += "Perfect for large families or shared accommodation."
93
  return desc
94
 
95
- def _get_current_message(state: Dict) -> str:
96
- """Extract ONLY the current user message."""
97
- full = state["messages"][-1]["content"]
98
- if "Now the user says:" in full:
99
- return full.split("Now the user says:")[-1].strip()
100
- return full.strip()
101
-
102
- # ============================================================
103
- # Draft Node
104
- # ============================================================
105
-
106
- @handle_errors(default_return=None)
107
  async def draft_node(state: Dict) -> Dict:
108
  """
109
- LangGraph node: Generate draft listing preview
110
-
111
- Features:
112
- - Prevents regeneration when preview active
113
- - ML validation of all fields
114
- - Professional title/description generation
115
- - Amenity icon formatting
116
- - Error handling with graceful fallback
117
- - Full observability and logging
118
  """
119
 
120
- status = state.get("status")
121
-
122
- with trace_operation(
123
- "draft_node",
124
- {
125
- "status": status,
126
- "intent": state.get("intent"),
127
- "has_draft": state.get("draft_preview") is not None,
128
- }
129
- ):
130
- # ===== CRITICAL: Exit immediately if preview already active =====
131
- if status in {"preview_shown", "waiting_for_images"}:
132
- logger.info("🛑 DRAFT NODE SKIPPED: Preview already active")
133
- return state
134
 
135
- # ===== Only proceed if status is EXACTLY "draft_ready" =====
136
- if status != "draft_ready":
137
- logger.info(f"🛑 DRAFT NODE SKIPPED: status={status}, not draft_ready")
138
- return state
 
 
139
 
140
- # ===== Only proceed if intent is "list" =====
141
- if state.get("intent") != "list":
142
- logger.info("🛑 DRAFT NODE SKIPPED: intent is not 'list'")
 
 
 
 
 
 
 
 
 
143
  return state
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
- # ===== If we get here, generate the draft =====
146
- logger.info("✅ DRAFT NODE RUNNING: Generating draft preview")
147
-
148
- user_id = state.get("user_id")
149
-
150
- # ===== ML VALIDATION =====
151
- with trace_operation("ml_validation"):
152
- try:
153
- validation = ml_extractor.validate_all_fields(state, user_id)
154
-
155
- if not validation["all_valid"]:
156
- issues_text = "\n".join([f"❌ {issue}" for issue in validation["issues"]])
157
- state["ai_reply"] = f"""I found some issues with your listing:
158
 
159
  {issues_text}
160
 
161
  Let me ask again - could you clarify these fields?"""
162
- state["status"] = "collecting"
163
- state["missing_fields"] = [
164
- field for field, result in validation["field_validations"].items()
165
- if not result["is_valid"]
166
- ]
167
- logger.warning(f"🚫 Validation failed", extra={"issues": validation["issues"]})
168
- return state
169
-
170
- logger.info(f"✅ All fields passed validation")
171
 
172
- except Exception as e:
173
- logger.error(f"❌ ML validation error: {e}", exc_info=True)
174
- state["ai_reply"] = "Sorry, I couldn't validate your listing. Please try again."
175
- state["status"] = "error"
176
- return state
177
-
178
- # ===== DRAFT GENERATION =====
179
- with trace_operation("draft_generation"):
180
- try:
181
- # Generate components
182
- title = _generate_title(state)
183
- description = _generate_description(state)
184
- amenities_with_icons = _add_amenity_icons(state.get("amenities", []))
185
- images = (
186
- state.get("draft", {}).get("images", [])
187
- if isinstance(state.get("draft"), dict)
188
- else []
189
- )
190
-
191
- # Build draft preview
192
- draft_preview = {
193
- "title": title,
194
- "description": description,
195
- "location": state.get("location", "").title(),
196
- "bedrooms": state.get("bedrooms"),
197
- "bathrooms": state.get("bathrooms"),
198
- "price": state.get("price"),
199
- "price_type": state.get("price_type"),
200
- "listing_type": state.get("listing_type"),
201
- "amenities": state.get("amenities", []),
202
- "amenities_with_icons": amenities_with_icons,
203
- "requirements": state.get("requirements"),
204
- "currency": state.get("currency", "XOF"),
205
- "images": images,
206
- "field_confidences": validation.get("field_validations", {}),
207
- }
208
-
209
- logger.info(
210
- f"🎯 Draft generated",
211
- extra={
212
- "title": title,
213
- "images": len(images),
214
- "amenities": len(state.get("amenities", [])),
215
- }
216
- )
217
 
218
- except Exception as e:
219
- logger.error(f"❌ Failed to generate draft: {e}", exc_info=True)
220
- state["ai_reply"] = "Sorry, I couldn't generate your draft. Please try again."
221
- state["status"] = "error"
222
- return state
223
 
224
- # ===== BUILD PREVIEW MESSAGE =====
225
- with trace_operation("build_preview_message"):
226
- try:
227
- images_section = ""
228
- if images:
229
- images_section = f"\n📷 Images: {len(images)} uploaded\n"
230
- for idx, img_url in enumerate(images[:3], 1):
231
- images_section += f" {idx}. {img_url[:60]}...\n"
232
- if len(images) > 3:
233
- images_section += f" ... and {len(images) - 3} more\n"
234
-
235
- preview_text = f"""
236
- ┌─────────────────────────────────────────────────────────────────────────┐
237
- 🏠 LISTING PREVIEW
238
- └─────────────────────────────────────────────────────────────────────────┘
239
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  **{draft_preview['title']}**
241
 
242
- 📍 Location: {draft_preview['location']}
243
- 🛏 Bedrooms: {draft_preview['bedrooms']}
244
- 🚿 Bathrooms: {draft_preview['bathrooms']}
245
- 💰 Price: {draft_preview['price']:,} {draft_preview['price_type']} ({draft_preview['currency']})
246
 
247
  {draft_preview['description']}
248
 
249
- Amenities: {draft_preview['amenities_with_icons'] if draft_preview['amenities_with_icons'] else 'None specified'}
250
  {images_section}
251
- └─────────────────────────────────────────────────────────────────────────┘
252
  """
253
-
254
- if not images:
255
- preview_text += """
256
- 📸 Upload property images to make your listing more attractive!
 
257
 
258
  Then say **publish** to make it live!
259
  """
260
- state["status"] = "waiting_for_images"
261
- logger.info("⏳ Waiting for images")
262
- else:
263
- preview_text += """
264
- ✅ Perfect! Say **publish** to make your listing live!
265
  """
266
- state["status"] = "preview_shown"
267
- logger.info("✅ Preview ready for publishing")
268
-
269
- state["draft_preview"] = draft_preview
270
- state["ai_reply"] = preview_text
271
-
272
- logger.info(
273
- f"✅ Draft node completed",
274
- extra={"status": state["status"]}
275
- )
276
-
277
- return state
278
-
279
- except Exception as e:
280
- logger.error(f"❌ Failed to build preview: {e}", exc_info=True)
281
- state["ai_reply"] = "Sorry, an error occurred preparing your listing. Please try again."
282
- state["status"] = "error"
283
- return state
 
1
+ # app/ai/nodes/draft_node.py - Fix location persistence + add amenity icons
 
2
  import datetime
3
  from typing import Dict
4
+ from bson import ObjectId
5
  from app.database import get_db
6
+ from app.ai.state import ListingDraft
7
+ from structlog import get_logger
8
+ from openai import AsyncOpenAI
9
+ from app.config import settings
10
  from app.ml.models.ml_listing_extractor import get_ml_extractor
11
 
12
+ logger = get_logger(__name__)
13
 
14
+ client = AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
15
  ml_extractor = get_ml_extractor()
16
 
17
+ # ========== AMENITY ICONS MAPPING ==========
 
 
 
18
  AMENITY_ICONS = {
19
+ "wifi": "📶",
20
+ "parking": "🅿️",
21
+ "furnished": "🛋️",
22
+ "washing machine": "🧼",
23
+ "washing": "🧼",
24
+ "dryer": "🔥",
25
+ "balcony": "🏞️",
26
+ "pool": "🏊",
27
+ "gym": "💪",
28
+ "garden": "🌿",
29
+ "air conditioning": "❄️",
30
+ "ac": "❄️",
31
+ "kitchen": "🍳",
 
 
32
  }
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  def _add_amenity_icons(amenities: list) -> str:
35
  """Convert amenities list to string with icons."""
36
  if not amenities:
37
  return ""
38
+
39
  icons_text = []
40
  for amenity in amenities:
41
  amenity_lower = amenity.lower().strip()
42
+ icon = AMENITY_ICONS.get(amenity_lower, "✓")
43
  icons_text.append(f"{icon} {amenity.title()}")
44
+
45
  return " | ".join(icons_text)
46
 
47
  def _generate_title(state: Dict) -> str:
48
  """Generate professional title from listing data."""
49
  bedrooms = state.get("bedrooms", "")
50
+ # ✅ CRITICAL: Use current location from state, not regenerated
51
  location = state.get("location", "").title()
52
  listing_type = state.get("listing_type", "").title()
53
+
54
  if bedrooms and location:
55
  return f"{bedrooms}-Bedroom {listing_type} in {location}"
56
  return f"Property in {location}"
 
59
  """Generate professional description from listing data."""
60
  bedrooms = state.get("bedrooms", "")
61
  bathrooms = state.get("bathrooms", "")
62
+ # ✅ CRITICAL: Use current location from state, not regenerated
63
  location = state.get("location", "").title()
64
  amenities = state.get("amenities", [])
65
  price = state.get("price", "")
66
  price_type = state.get("price_type", "").title()
67
  listing_type = state.get("listing_type", "").title()
68
  requirements = state.get("requirements", "")
69
+
70
  desc = f"Spacious {bedrooms}-bedroom, {bathrooms}-bathroom {listing_type} "
71
  desc += f"located in {location}. "
72
+
73
  if price:
74
  desc += f"Priced at {price:,} {price_type}. "
75
+
76
  if amenities:
77
  amenities_str = ", ".join(amenities)
78
  desc += f"Fully furnished with modern amenities including {amenities_str}. "
79
+
80
  if requirements:
81
  desc += f"Requirements: {requirements}. "
82
+
83
  desc += "Perfect for large families or shared accommodation."
84
  return desc
85
 
86
+ # ---------- node ----------
 
 
 
 
 
 
 
 
 
 
 
87
  async def draft_node(state: Dict) -> Dict:
88
  """
89
+ LangGraph node:
90
+ - Check if status == "checking_optional" (amenities/requirements missing)
91
+ - Ask for amenities and requirements
92
+ - Move to "draft_ready"
93
+ - Or status == "draft_ready" → show preview
 
 
 
 
94
  """
95
 
96
+ # ✅ NEW: Check if amenities/requirements are missing
97
+ if state.get("status") == "checking_optional":
98
+ amenities = state.get("amenities", [])
99
+ requirements = state.get("requirements")
 
 
 
 
 
 
 
 
 
 
100
 
101
+ # Ask about missing optional fields
102
+ missing_optional = []
103
+ if not amenities:
104
+ missing_optional.append("amenities")
105
+ if not requirements:
106
+ missing_optional.append("requirements")
107
 
108
+ if missing_optional:
109
+ state["status"] = "collecting_optional"
110
+ state["missing_fields"] = missing_optional
111
+
112
+ questions = []
113
+ if "amenities" in missing_optional:
114
+ questions.append("Any amenities? (e.g., wifi, parking, balcony, pool, furnished, kitchen, dryer, garden, etc.)")
115
+ if "requirements" in missing_optional:
116
+ questions.append("Any special requirements for renters?")
117
+
118
+ state["ai_reply"] = "Just a couple more things...\n\n" + "\n".join([f"• {q}" for q in questions])
119
+ logger.info("ℹ️ Asking for optional fields", missing=missing_optional)
120
  return state
121
+ else:
122
+ # All optional fields provided, move to draft
123
+ state["status"] = "draft_ready"
124
+
125
+ # Only process if listing creation with all fields ready
126
+ if state.get("intent") != "list" or state.get("status") != "draft_ready":
127
+ return state
128
+
129
+ user_id = state.get("user_id")
130
+
131
+ # ✅ ML VALIDATION before drafting
132
+ try:
133
+ validation = ml_extractor.validate_all_fields(state, user_id)
134
 
135
+ if not validation["all_valid"]:
136
+ # Fields failed validation, go back to collecting
137
+ issues_text = "\n".join([f"❌ {issue}" for issue in validation["issues"]])
138
+ state["ai_reply"] = f"""I found some issues with your listing:
 
 
 
 
 
 
 
 
 
139
 
140
  {issues_text}
141
 
142
  Let me ask again - could you clarify these fields?"""
 
 
 
 
 
 
 
 
 
143
 
144
+ state["status"] = "collecting"
145
+ # Re-populate missing fields based on validation
146
+ state["missing_fields"] = [
147
+ field for field, result in validation["field_validations"].items()
148
+ if not result["is_valid"]
149
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
+ logger.warning("🚫 Fields failed ML validation", issues=validation["issues"])
152
+ return state
 
 
 
153
 
154
+ logger.info("✅ All fields passed ML validation", user_id=user_id)
155
+
156
+ except Exception as e:
157
+ logger.error("❌ ML validation error", exc_info=e)
158
+ state["ai_reply"] = "Sorry, I couldn't validate your listing. Please try again."
159
+ state["status"] = "error"
160
+ return state
161
+
162
+ # Generate title and description
163
+ # ✅ CRITICAL: These use state's current location (corrected by user)
164
+ title = _generate_title(state)
165
+ description = _generate_description(state)
166
+ amenities_with_icons = _add_amenity_icons(state.get("amenities", []))
167
+
168
+ # ✅ Get images from state (if any were uploaded)
169
+ images = state.get("draft", {}).get("images", []) if isinstance(state.get("draft"), dict) else []
170
+
171
+ # Build draft preview with all fields including images and icons
172
+ draft_preview = {
173
+ "title": title,
174
+ "description": description,
175
+ "location": state.get("location", "").title(), # ✅ Use current location
176
+ "bedrooms": state.get("bedrooms"),
177
+ "bathrooms": state.get("bathrooms"),
178
+ "price": state.get("price"),
179
+ "price_type": state.get("price_type"),
180
+ "listing_type": state.get("listing_type"),
181
+ "amenities": state.get("amenities", []),
182
+ "amenities_with_icons": amenities_with_icons, # ✅ NEW: Add icons
183
+ "requirements": state.get("requirements"),
184
+ "currency": state.get("currency", "XOF"),
185
+ "images": images, # ✅ INCLUDE IMAGES
186
+ "field_confidences": validation["field_validations"],
187
+ }
188
+
189
+ logger.info("🎯 Draft preview generated",
190
+ title=title,
191
+ location=state.get("location"), # ✅ Log actual location
192
+ image_count=len(images),
193
+ amenities=state.get("amenities", []))
194
+
195
+ # Build nice preview message for user
196
+ images_section = ""
197
+ if images:
198
+ images_section = f"\n📷 Images: {len(images)} uploaded\n"
199
+ for idx, img_url in enumerate(images[:3], 1):
200
+ images_section += f" {idx}. {img_url[:60]}...\n"
201
+ if len(images) > 3:
202
+ images_section += f" ... and {len(images) - 3} more\n"
203
+
204
+ preview_text = f"""
205
+ ┌──────────────────────────────────────────────────────────────────────────────
206
+ 🏠LISTING PREVIEW
207
+ └──────────────────────────────────────────────────────────────────────────────
208
  **{draft_preview['title']}**
209
 
210
+ 📍 Location: {draft_preview['location']}
211
+ 🛏️ Bedrooms: {draft_preview['bedrooms']}
212
+ 🚿 Bathrooms: {draft_preview['bathrooms']}
213
+ 💰 Price: {draft_preview['price']:,} {draft_preview['price_type']} ({draft_preview['currency']})
214
 
215
  {draft_preview['description']}
216
 
217
+ ✨ Amenities: {draft_preview['amenities_with_icons'] if draft_preview['amenities_with_icons'] else 'None specified'}
218
  {images_section}
219
+ └──────────────────────────────────────────────────────────────────────────────
220
  """
221
+
222
+ # ✅ Check if images uploaded, ask if not
223
+ if not images:
224
+ preview_text += """
225
+ 📸 Upload property images to make your listing more attractive!
226
 
227
  Then say **publish** to make it live!
228
  """
229
+ state["status"] = "waiting_for_images" # ✅ Wait for images
230
+ else:
231
+ preview_text += """
232
+ ✅ Perfect! Say **publish** to make your listing live!
 
233
  """
234
+ state["status"] = "preview_shown" # ✅ Ready to publish
235
+
236
+ state["draft_preview"] = draft_preview
237
+ state["ai_reply"] = preview_text
238
+
239
+ return state
 
 
 
 
 
 
 
 
 
 
 
 
app/ai/nodes/image_node.py CHANGED
@@ -30,7 +30,7 @@ async def image_node(state: Dict) -> Dict:
30
  binary = base64.b64decode(base64_data)
31
  except Exception as e:
32
  logger.warning("Bad base64 image", exc_info=e)
33
- state["ai_reply"] = " Invalid image data. Please try again."
34
  return state
35
 
36
  headers = {"Authorization": f"Bearer {settings.CF_API_TOKEN}"}
@@ -46,7 +46,7 @@ async def image_node(state: Dict) -> Dict:
46
  public_url = f"https://imagedelivery.net/{data['result']['id']}/public"
47
  except Exception as e:
48
  logger.error("Cloudflare upload failed", exc_info=e)
49
- state["ai_reply"] = " Could not upload image. Please try again."
50
  return state
51
 
52
  # append URL to draft
@@ -55,9 +55,9 @@ async def image_node(state: Dict) -> Dict:
55
  if not hasattr(draft, "images"):
56
  draft.images = []
57
  draft.images.append(public_url)
58
- state["ai_reply"] = f"📸 Picture uploaded! Add more or say **publish** when ready.\n{public_url}"
59
  else:
60
- state["ai_reply"] = "📸 Picture uploaded! Continue describing the property."
61
 
62
  logger.info("Image uploaded to Cloudflare", url=public_url)
63
  return state
 
30
  binary = base64.b64decode(base64_data)
31
  except Exception as e:
32
  logger.warning("Bad base64 image", exc_info=e)
33
+ state["ai_reply"] = "❌ Invalid image data. Please try again."
34
  return state
35
 
36
  headers = {"Authorization": f"Bearer {settings.CF_API_TOKEN}"}
 
46
  public_url = f"https://imagedelivery.net/{data['result']['id']}/public"
47
  except Exception as e:
48
  logger.error("Cloudflare upload failed", exc_info=e)
49
+ state["ai_reply"] = "❌ Could not upload image. Please try again."
50
  return state
51
 
52
  # append URL to draft
 
55
  if not hasattr(draft, "images"):
56
  draft.images = []
57
  draft.images.append(public_url)
58
+ state["ai_reply"] = f"📸 Picture uploaded! Add more or say **publish** when ready.\n{public_url}"
59
  else:
60
+ state["ai_reply"] = "📸 Picture uploaded! Continue describing the property."
61
 
62
  logger.info("Image uploaded to Cloudflare", url=public_url)
63
  return state
app/ai/nodes/intent_node.py CHANGED
@@ -1,197 +1,390 @@
1
- # app/ai/nodes/intent_node.py FINAL: chat until clear "list", then show example
2
- import json, re, logging
3
- from typing import Dict
4
- from app.core.llm_router import call_llm_smart
5
- from app.core.context_manager import get_context_manager
6
- from app.core.error_handling import async_retry, RetryStrategy, trace_operation
 
 
7
  from app.ml.models.ml_listing_extractor import get_ml_extractor
8
- from app.ml.models.user_role_context_handler import RentalModel
9
- from app.ai.services.dynamic_role_manager import get_dynamic_role_manager
10
 
11
- logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
 
12
  ml_extractor = get_ml_extractor()
13
- role_manager = get_dynamic_role_manager()
14
 
15
- # ------------------------------------------------------------------
16
- # helpers
17
- # ------------------------------------------------------------------
18
- def _clean_json(raw: str) -> str:
19
- return re.sub(r'```json\s*', '', re.sub(r'```\s*', '', raw)).strip()
20
 
21
- def _get_current_message(state: Dict) -> str:
22
- full = state["messages"][-1]["content"]
23
- return full.split("Now the user says:")[-1].strip() if "Now the user says:" in full else full.strip()
24
 
25
- def _is_clear_list_intent(msg: str) -> bool:
26
- """Detect explicit list trigger after casual chat."""
27
- return bool(re.search(r'\b(list\s+a?\s*property|i\s+want\s+to\s+list)\b', msg.lower()))
 
 
28
 
29
- def _listing_example() -> str:
30
- return """
31
- 📋 **Here’s how it works:**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- **Example 1 Rental:**
34
- You: *"I have a 3-bed apartment in Lagos, 150,000 per month"*
35
- Me: *"Got it! 3 bed, monthly. What amenities? (WiFi, parking, etc.)"*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- **Example 2 Short-stay:**
38
- You: *"My place in Lagos, 2 bedrooms, 5,000 per night"*
39
- Me: *"Perfect! Short-stay in Lagos. What amenities?"*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- **Example 3 Sale:**
42
- You: *"Selling a 4-bed house in Ikoyi for 20 million"*
43
- Me: *"Got it! 4-bed sale. What amenities?"*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- Now tell me about **your** property! 👇
46
- """
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- # ------------------------------------------------------------------
49
- # core node
50
- # ------------------------------------------------------------------
51
- @async_retry(strategy=RetryStrategy.MODERATE, operation_name="intent_node")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  async def intent_node(state: Dict) -> Dict:
53
- current_msg = _get_current_message(state).lower()
54
- user_id = state.get("user_id")
55
- user_role = state.get("user_role", "renter")
56
-
57
- # ===== COMMAND MODE (draft on screen) =====
58
- if state.get("draft_preview"):
59
- return _handle_draft_commands(state, current_msg)
60
-
61
- # ===== STEP 1: CASUAL CHAT UNTIL CLEAR "LIST" =====
62
- if not state.get("status") and not _is_clear_list_intent(current_msg):
63
- # casual chat
64
- state["intent"] = "chat"
65
- state["ai_reply"] = "👋 Hey! Want to list a property or search for one? Just say *'I want to list'* when ready."
66
- return state
67
-
68
- # ===== STEP 2: FIRST CLEAR "LIST" → SHOW EXAMPLE ONLY =====
69
- if _is_clear_list_intent(current_msg) and not state.get("status"):
70
- state["intent"] = "list"
71
- state["status"] = "collecting"
72
- state["ai_reply"] = _listing_example()
73
- return state
74
-
75
- # ===== STEP 3: EXTRACT / VALIDATE / DRAFT FLOW =====
76
  human_msg = state["messages"][-1]["content"]
77
 
78
- with trace_operation("intent_node_ml", {"user_id": user_id}):
79
- # 1. LLM extraction
80
- try:
81
- messages = await get_context_manager().manage_context([
82
- {"role": "system", "content": "You are Aida, real-estate AI. Extract JSON only when property details appear."},
83
- {"role": "user", "content": human_msg}
84
- ])
85
- text, _, _ = await call_llm_smart(messages, temperature=0.3, max_tokens=800)
86
- data = json.loads(_clean_json(text))
87
- except Exception:
88
- state["ai_reply"] = "Sorry, I didn’t catch that. Could you rephrase?"
89
- state["intent"] = "chat"
90
- return state
91
-
92
- # 2. merge fields
93
- for k in ("location", "bedrooms", "bathrooms", "price", "price_type", "listing_type",
94
- "amenities", "requirements"):
95
- if data.get(k) is not None:
96
- state[k] = data[k]
97
-
98
- # 3. strict listing-type rental-model
99
- listing_type = state.get("listing_type")
100
- if listing_type == "rent":
101
- rental_model = RentalModel.AFRICAN_RENTAL
102
- elif listing_type == "short-stay":
103
- rental_model = RentalModel.AIRBNB
104
- elif listing_type == "roommate":
105
- rental_model = RentalModel.ROOMMATE
106
- elif listing_type == "sale":
107
- rental_model = "sale"
108
- else:
109
- rental_model = RentalModel.MIXED
110
- state["rental_model"] = rental_model.value if hasattr(rental_model, "value") else rental_model
111
-
112
- # 4. ML refinement
113
- if state.get("location"):
114
- city, details = await ml_extractor.extract_location_from_address(state["location"])
115
- if city:
116
- state["location"] = city
117
- state["location_details"] = details
118
-
119
- # 5. ML validation
120
- validation = ml_extractor.validate_all_fields(state, user_id)
121
- state["field_validations"] = validation["field_validations"]
122
-
123
- # 6. currency
124
- currency, city, conf = await ml_extractor.infer_currency(state)
125
- if currency:
126
- state["currency"] = currency
127
- state["currency_confidence"] = conf
128
-
129
- # 7. build reply
130
- if validation["all_valid"]:
131
- state["status"] = "draft_ready"
132
- state["ai_reply"] = _build_preview_prompt(state)
133
- else:
134
- missing = validation["issues"]
135
- state["missing_fields"] = missing
136
- state["ai_reply"] = _ask_next_missing(missing, state)
137
-
138
- state["intent"] = "list"
139
- return state
140
-
141
- # ------------------------------------------------------------------
142
- # sub-helpers (unchanged)
143
- # ------------------------------------------------------------------
144
- def _handle_draft_commands(state: Dict, msg: str) -> Dict:
145
- if any(w in msg for w in {"publish", "go live", "post", "confirm", "yes", "ok"}):
146
- state["intent"] = "publish"
147
- state["ai_reply"] = ""
148
- return state
149
- if any(w in msg for w in {"edit", "change", "update", "fix"}):
150
- state["intent"] = "edit"
151
- state["edit_field"] = re.sub(r"\b(edit|change|update|fix)\b", "", msg).strip() or "general"
152
- state["ai_reply"] = f"Sure! What would you like to change about **{state['edit_field']}**?"
153
- return state
154
- if any(w in msg for w in {"discard", "cancel", "delete", "no"}):
155
- state["intent"] = "discard"
156
- state["draft_preview"] = None
157
- state["status"] = None
158
- state["missing_fields"] = []
159
- state["ai_reply"] = "Draft discarded! What would you like to do next?"
160
- return state
161
- if "search" in msg:
162
- state["intent"] = "search"
163
- return state
164
- state["intent"] = "chat"
165
- state["ai_reply"] = "Say **publish** to list it, **edit** to change something, or **discard** to start over."
166
- return state
167
-
168
- def _build_preview_prompt(state: Dict) -> str:
169
- title = f"{state.get('bedrooms', '')}-Bedroom {state.get('listing_type', '').title()} in {state.get('location', '').title()}"
170
- amenities = ", ".join(state.get("amenities", [])) or "None specified"
171
- price = f"{int(state.get('price', 0)):,} {state.get('price_type', '')}"
172
- return f"""
173
- ✨ **LISTING PREVIEW**
174
-
175
- 🏠 **{title}**
176
- 📍 Location: {state.get('location', '').title()}
177
- 🛏️ Bedrooms: {state.get('bedrooms')}
178
- 🚿 Bathrooms: {state.get('bathrooms')}
179
- 💰 Price: {price} ({state.get('currency', 'XOF')})
180
- 🎯 Amenities: {amenities}
181
-
182
- **What’s next?**
183
- - **publish** – Go live!
184
- - **edit [field]** – Change something
185
- - **discard** – Start over
186
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
- def _ask_next_missing(missing: list, state: Dict) -> str:
189
- questions = {
190
- "location": "📍 Where is the property located?",
191
- "bedrooms": "🛏️ How many bedrooms?",
192
- "bathrooms": "🚿 How many bathrooms?",
193
- "price": "💰 What’s the price/rent amount?",
194
- "price_type": "Is it per month, per night, per year, etc.?"
195
- }
196
- next_f = missing[0] if missing else "amenities"
197
- return "Issues found:\n" + "\n".join(f"❌ {m}" for m in missing) + f"\n\n{questions.get(next_f, 'Tell me more')}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/ai/nodes/intent_node.py - FIXED: Simple example, collect amenities/requirements
2
+ import json
3
+ import re
4
+ from typing import Dict, List
5
+ from tenacity import retry, stop_after_attempt, wait_exponential
6
+ from openai import AsyncOpenAI
7
+ from structlog import get_logger
8
+ from app.config import settings
9
  from app.ml.models.ml_listing_extractor import get_ml_extractor
 
 
10
 
11
+ logger = get_logger(__name__)
12
+
13
+ MODEL = "deepseek-chat"
14
+ MAX_TOKENS = 600
15
+ TEMP = 0
16
+
17
+ client = AsyncOpenAI(api_key=settings.DEEPSEEK_API_KEY,
18
+ base_url=settings.DEEPSEEK_BASE_URL)
19
+
20
  ml_extractor = get_ml_extractor()
 
21
 
22
+ def _load_system() -> str:
23
+ with open("app/ai/prompts/system_prompt.txt", encoding="utf-8") as f:
24
+ return f.read()
 
 
25
 
26
+ SYSTEM_PROMPT = _load_system()
 
 
27
 
28
+ def _clean_json(raw: str) -> str:
29
+ """Remove markdown code blocks from JSON string."""
30
+ cleaned = re.sub(r'```json\s*', '', raw)
31
+ cleaned = re.sub(r'```\s*', '', cleaned)
32
+ return cleaned.strip()
33
 
34
+ def _normalize_locations(location: str) -> str:
35
+ """Normalize location names (handle typos)."""
36
+ if not location:
37
+ return None
38
+
39
+ loc_lower = location.lower().strip()
40
+
41
+ location_map = {
42
+ "lago": "lagos",
43
+ "lgs": "lagos",
44
+ "lag": "lagos",
45
+ "cotnu": "cotonou",
46
+ "cotonus": "cotonou",
47
+ "cotou": "cotonou",
48
+ "akpakpa": "akpakpa",
49
+ "nairobi": "nairobi",
50
+ "nbi": "nairobi",
51
+ "accra": "accra",
52
+ "acc": "accra",
53
+ "joburg": "johannesburg",
54
+ "jozi": "johannesburg",
55
+ "london": "london",
56
+ "paris": "paris",
57
+ }
58
+
59
+ return location_map.get(loc_lower, location.lower())
60
 
61
+ def _normalize_amenities(amenities: list) -> list:
62
+ """Normalize amenity names (handle typos)."""
63
+ if not amenities:
64
+ return []
65
+
66
+ amenity_map = {
67
+ "balcno": "balcony",
68
+ "balconny": "balcony",
69
+ "parkng": "parking",
70
+ "park": "parking",
71
+ "wifi": "wifi",
72
+ "furnisd": "furnished",
73
+ "furnishd": "furnished",
74
+ "furnish": "furnished",
75
+ "ac": "air conditioning",
76
+ "air cond": "air conditioning",
77
+ "aircond": "air conditioning",
78
+ "gym": "gym",
79
+ "pool": "pool",
80
+ "swiming": "pool",
81
+ "kitchen": "kitchen",
82
+ "kitchn": "kitchen",
83
+ "dryer": "dryer",
84
+ "garden": "garden",
85
+ "gdn": "garden",
86
+ }
87
+
88
+ normalized = []
89
+ for amenity in amenities:
90
+ if not amenity:
91
+ continue
92
+ am_lower = amenity.lower().strip()
93
+ normalized_am = amenity_map.get(am_lower, am_lower)
94
+ if normalized_am and normalized_am not in normalized:
95
+ normalized.append(normalized_am)
96
+
97
+ return normalized
98
 
99
+ def _normalize_price_type(price_type: str) -> str:
100
+ """Normalize price_type (handle typos)."""
101
+ if not price_type:
102
+ return None
103
+
104
+ pt_lower = price_type.lower().strip()
105
+
106
+ price_type_map = {
107
+ "montly": "monthly",
108
+ "monthyl": "monthly",
109
+ "mth": "monthly",
110
+ "month": "monthly",
111
+ "nightl": "nightly",
112
+ "nightly": "nightly",
113
+ "night": "nightly",
114
+ "daily": "daily",
115
+ "day": "daily",
116
+ "weakly": "weekly",
117
+ "weakyl": "weekly",
118
+ "week": "weekly",
119
+ "yr": "yearly",
120
+ "year": "yearly",
121
+ "yearly": "yearly",
122
+ "annum": "yearly",
123
+ }
124
+
125
+ return price_type_map.get(pt_lower, pt_lower)
126
 
127
+ def _normalize_listing_type(listing_type: str) -> str:
128
+ """Normalize listing_type (handle typos)."""
129
+ if not listing_type:
130
+ return None
131
+
132
+ lt_lower = listing_type.lower().strip()
133
+
134
+ listing_type_map = {
135
+ "rent": "rent",
136
+ "for rent": "rent",
137
+ "rental": "rent",
138
+ "short stay": "short-stay",
139
+ "short-stay": "short-stay",
140
+ "shortsta": "short-stay",
141
+ "short stya": "short-stay",
142
+ "stayover": "short-stay",
143
+ "roommate": "roommate",
144
+ "roommat": "roommate",
145
+ "sharing": "roommate",
146
+ "flatmate": "roommate",
147
+ "shareflat": "roommate",
148
+ "sale": "sale",
149
+ "for sale": "sale",
150
+ "selling": "sale",
151
+ "sell": "sale",
152
+ }
153
+
154
+ return listing_type_map.get(lt_lower, lt_lower)
155
 
156
+ def _get_missing_fields(data: Dict) -> List[str]:
157
+ """Identify which REQUIRED fields are missing for listing creation."""
158
+ if data.get("intent") != "list":
159
+ return []
160
+
161
+ required = ["location", "bedrooms", "bathrooms", "price", "listing_type", "price_type"]
162
+ missing = []
163
+
164
+ for field in required:
165
+ value = data.get(field)
166
+ if value is None or value == "" or value == 0:
167
+ missing.append(field)
168
+
169
+ return missing
170
 
171
+ def _get_next_question(missing_fields: List[str]) -> str:
172
+ """Get the next question based on missing fields."""
173
+ if not missing_fields:
174
+ return None
175
+
176
+ next_field = missing_fields[0]
177
+
178
+ questions = {
179
+ "location": "Where is the property located? (city/area)",
180
+ "bedrooms": "How many bedrooms?",
181
+ "bathrooms": "How many bathrooms?",
182
+ "price": "What's the price/rent amount?",
183
+ "listing_type": "Is it for rent, short-stay, sale, or roommate?",
184
+ "price_type": "Is it monthly, nightly, or yearly?"
185
+ }
186
+
187
+ return questions.get(next_field, "What else should I know?")
188
+
189
+ @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=5))
190
  async def intent_node(state: Dict) -> Dict:
191
+ user_role = state["user_role"]
192
+ user_id = state.get("user_id")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  human_msg = state["messages"][-1]["content"]
194
 
195
+ prompt = SYSTEM_PROMPT.replace("{user_role}", user_role)
196
+ messages = [
197
+ {"role": "system", "content": prompt},
198
+ {"role": "user", "content": human_msg},
199
+ ]
200
+
201
+ logger.info("🤖 Aida intent call", user_role=user_role, msg=human_msg)
202
+ resp = await client.chat.completions.create(
203
+ model=MODEL,
204
+ messages=messages,
205
+ temperature=TEMP,
206
+ max_tokens=MAX_TOKENS,
207
+ )
208
+ raw = resp.choices[0].message.content.strip()
209
+ logger.debug("🤖 Aida raw response", raw=raw)
210
+
211
+ try:
212
+ cleaned = _clean_json(raw)
213
+ data = json.loads(cleaned)
214
+ except json.JSONDecodeError as e:
215
+ logger.error("❌ Aida bad json", raw=raw, exc_info=e)
216
+ data = {
217
+ "allowed": False,
218
+ "ai_reply": "Sorry, I didn't understand that. Could you rephrase?",
219
+ }
220
+
221
+ # Handle LISTING INTENT with progressive collection
222
+ if data.get("intent") == "list":
223
+ data["allowed"] = True
224
+ missing = _get_missing_fields(data)
225
+
226
+ # ✅ SIMPLIFIED: Show introduction + example + image upload instruction (ONLY ON FIRST MESSAGE)
227
+ intro_and_example = """
228
+ To list a property, here's how it's done:
229
+
230
+ 📝 **Example:**
231
+ "I have a 3-bedroom, 2-bathroom property in Cotonou for rent at 50,000 XOF per month. It has a balcony, kitchen, and dryer. It's fully furnished. Renters must pay a 3-month deposit."
232
+
233
+ 📸 You can also upload property photos to make it more attractive!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  """
235
+
236
+ # ✅ Only show example if this is the FIRST listing intent message (no data collected yet)
237
+ should_show_example = not any([
238
+ data.get("bedrooms"),
239
+ data.get("bathrooms"),
240
+ data.get("price"),
241
+ data.get("amenities"),
242
+ data.get("requirements")
243
+ ])
244
+
245
+ if missing:
246
+ data["status"] = "collecting"
247
+ data["missing_fields"] = missing
248
+ data["next_question"] = _get_next_question(missing)
249
+ # ✅ Show example ONLY on first message, then just ask the question
250
+ if should_show_example:
251
+ data["ai_reply"] = intro_and_example
252
+ else:
253
+ data["ai_reply"] = _get_next_question(missing)
254
+ else:
255
+ data["status"] = "checking_optional" # ✅ NEW: Check for amenities/requirements
256
+ data["missing_fields"] = []
257
+ # ✅ Don't show example if user already provided fields
258
+ if not should_show_example:
259
+ data["ai_reply"] = "Perfect! Any amenities or special requirements for renters?"
260
+ else:
261
+ data["ai_reply"] = intro_and_example
262
+
263
+ # SEARCH is always allowed (role_gate_node will check)
264
+ if data.get("intent") == "search":
265
+ data["allowed"] = True
266
 
267
+ # Normalize values
268
+ location = _normalize_locations(data.get("location"))
269
+ amenities = _normalize_amenities(data.get("amenities", []))
270
+ price_type = _normalize_price_type(data.get("price_type"))
271
+ listing_type = _normalize_listing_type(data.get("listing_type"))
272
+
273
+ # ✅ SMART INFERENCE + ML VALIDATION with ERROR HANDLING
274
+ if data.get("intent") == "list":
275
+ location_input = data.get("location")
276
+
277
+ # 1️⃣ Extract city from detailed address using Nominatim (WITH ERROR HANDLING)
278
+ if location_input:
279
+ try:
280
+ city, location_info = await ml_extractor.extract_location_from_address(location_input)
281
+ if city:
282
+ data["location"] = city
283
+ data["location_details"] = location_info
284
+ logger.info(f"✅ Extracted city from address: {location_input} → {city}")
285
+ except Exception as e:
286
+ logger.warning(f"⚠️ Failed to extract location: {e}")
287
+ pass
288
+
289
+ # 2️⃣ Infer listing_type from user role + message keywords
290
+ try:
291
+ listing_type, listing_confidence = ml_extractor.infer_listing_type(
292
+ data,
293
+ user_role=user_role,
294
+ user_message=human_msg
295
+ )
296
+ if listing_type:
297
+ data["listing_type"] = listing_type
298
+ data["listing_confidence"] = listing_confidence
299
+ logger.info(f"✅ Inferred listing_type: {listing_type} (confidence: {listing_confidence})")
300
+ except Exception as e:
301
+ logger.warning(f"⚠️ Failed to infer listing_type: {e}")
302
+ pass
303
+
304
+ # 3️⃣ Infer currency from location using Nominatim + REST Countries
305
+ try:
306
+ currency, extracted_city, currency_confidence = await ml_extractor.infer_currency(data)
307
+ if currency:
308
+ data["currency"] = currency
309
+ data["currency_confidence"] = currency_confidence
310
+ if extracted_city:
311
+ data["location"] = extracted_city
312
+ logger.info(f"✅ Inferred currency: {currency} (confidence: {currency_confidence})")
313
+ except Exception as e:
314
+ logger.warning(f"⚠️ Failed to infer currency: {e}")
315
+ data["currency"] = data.get("currency", "XOF")
316
+
317
+ # 4️⃣ ML VALIDATION of extracted fields (WITH ERROR HANDLING)
318
+ try:
319
+ validation_issues = []
320
+ validation_suggestions = []
321
+ field_validations = {}
322
+
323
+ for field in ["location", "bedrooms", "bathrooms", "price", "price_type"]:
324
+ value = data.get(field)
325
+ if value is not None:
326
+ result = ml_extractor.validate_field(field, value, human_msg, user_id)
327
+ field_validations[field] = result
328
+
329
+ if not result["is_valid"]:
330
+ validation_issues.append(f"❌ {field}: {result['suggestion']}")
331
+ logger.warning(f"Validation failed for {field}", suggestion=result["suggestion"])
332
+ elif result["suggestion"]:
333
+ validation_suggestions.append(f"⚠️ {field}: {result['suggestion']}")
334
+
335
+ data["field_validations"] = field_validations
336
+ data["validation_suggestions"] = validation_suggestions
337
+
338
+ # If validation issues, add them to the AI reply
339
+ if validation_issues:
340
+ current_reply = data.get("ai_reply", "")
341
+ data["ai_reply"] = current_reply + "\n\n" + "\n".join(validation_issues)
342
+ logger.info("ℹ️ ML validation issues found", issues=validation_issues)
343
+
344
+ except Exception as e:
345
+ logger.warning(f"⚠️ Failed to validate fields: {e}")
346
+ pass
347
+
348
+ intent_value = data.get("intent")
349
+
350
+ # Update state with all fields
351
+ state.update(
352
+ allowed=data.get("allowed", False),
353
+ status=data.get("status"),
354
+ missing_fields=data.get("missing_fields", []),
355
+ next_question=data.get("next_question"),
356
+
357
+ # Listing fields
358
+ listing_type=listing_type,
359
+ location=location,
360
+ bedrooms=data.get("bedrooms"),
361
+ bathrooms=data.get("bathrooms"),
362
+ price=data.get("price"),
363
+ price_type=price_type,
364
+ amenities=amenities,
365
+ requirements=data.get("requirements"),
366
+
367
+ # Search fields
368
+ min_price=data.get("min_price"),
369
+ max_price=data.get("max_price"),
370
+
371
+ # ML fields
372
+ field_validations=data.get("field_validations"),
373
+ listing_confidence=data.get("listing_confidence"),
374
+ currency_confidence=data.get("currency_confidence"),
375
+ location_details=data.get("location_details"),
376
+ validation_suggestions=data.get("validation_suggestions"),
377
+
378
+ # Other
379
+ currency=data.get("currency", "XOF"),
380
+ ai_reply=data.get("ai_reply", ""),
381
+ draft_preview=data.get("draft_preview"),
382
+ )
383
+
384
+ logger.info("📄 Intent node processed",
385
+ intent=intent_value,
386
+ status=state.get("status"),
387
+ missing_fields=state.get("missing_fields"),
388
+ location=state.get("location"))
389
+
390
+ return state
app/ai/nodes/publish_node.py CHANGED
@@ -1,4 +1,4 @@
1
- # app/ai/nodes/publish_node.py FINAL: only runs when user says "publish" + LOG insert
2
  import datetime
3
  from typing import Dict
4
  from app.database import get_db
@@ -8,37 +8,38 @@ from bson import ObjectId
8
 
9
  logger = get_logger(__name__)
10
 
 
11
  PUBLISH_WORDS = {"publish", "go live", "post it", "list it", "confirm", "yes", "ok", "okay"}
12
 
13
- # ---------- helpers ----------
14
- def _get_current_message(state: Dict) -> str:
15
- """
16
- Extract ONLY the current user message from the prompt we built in service.py
17
- """
18
- full = state["messages"][-1]["content"]
19
- if "Now the user says:" in full:
20
- return full.split("Now the user says:")[-1].strip()
21
- return full.strip()
22
-
23
  # ---------- node ----------
24
  async def publish_node(state: Dict) -> Dict:
25
  """
26
- Save draft to MongoDB as PUBLISHED when user explicitly confirms.
 
 
 
27
  """
28
- if state.get("status") not in ["preview_shown", "waiting_for_images"] or not state.get("draft_preview"):
 
 
29
  return state
30
-
31
- current_msg = _get_current_message(state).lower()
32
- if not any(word in current_msg for word in PUBLISH_WORDS):
 
 
 
33
  state["ai_reply"] = "Waiting for your confirmation. Say **publish** when ready!"
34
  return state
35
-
 
36
  draft_preview = state["draft_preview"]
37
-
38
  try:
39
  db = await get_db()
40
  col = db["listings"]
41
-
 
42
  listing_data = {
43
  "owner_id": state["user_id"],
44
  "type": draft_preview.get("listing_type"),
@@ -52,45 +53,46 @@ async def publish_node(state: Dict) -> Dict:
52
  "amenities": draft_preview.get("amenities", []),
53
  "requirements": draft_preview.get("requirements"),
54
  "currency": draft_preview.get("currency", "XOF"),
55
- "images": draft_preview.get("images", []),
56
  "status": "published",
57
  "created_at": datetime.datetime.utcnow(),
58
  "updated_at": datetime.datetime.utcnow(),
59
- "views": 0,
60
- "favorites": 0,
61
  }
62
-
63
  result = await col.insert_one(listing_data)
64
  mongo_id = str(result.inserted_id)
65
-
66
- # ===== NEW: log success =====
67
- logger.info("✅ Listing INSERTED into MongoDB", mongo_id=mongo_id)
68
- # ===== end NEW =====
69
-
 
 
 
70
  image_info = ""
71
  if draft_preview.get("images"):
72
- image_info = f"\n🖼️ With {len(draft_preview['images'])} image(s)"
73
-
74
  state["ai_reply"] = f"""
75
- **Listing Live!**
76
 
77
  Your property is now published and visible to renters/buyers:
78
  - **Title:** {draft_preview.get('title')}
79
  - **Location:** {draft_preview.get('location')}
80
  - **Price:** {draft_preview.get('price'):,} {draft_preview.get('price_type')}{image_info}
81
 
82
- 🎉 Users can now find your listing when they search!
83
 
84
  Want to list another property? Just say "list a property"
85
  """
86
- state["intent"] = None
87
  state["status"] = "published"
88
  state["mongo_id"] = mongo_id
89
- logger.info("✅ Intent cleared and status set to published")
90
-
91
  except Exception as e:
92
- logger.error(" Publish failed", exc_info=e)
93
  state["ai_reply"] = "Sorry, I couldn't publish your listing. Please try again."
94
  state["status"] = "error"
95
-
96
  return state
 
1
+ # app/ai/nodes/publish_node.py - FIXED WITH IMAGES IN MONGODB
2
  import datetime
3
  from typing import Dict
4
  from app.database import get_db
 
8
 
9
  logger = get_logger(__name__)
10
 
11
+ # ---------- intent keywords ----------
12
  PUBLISH_WORDS = {"publish", "go live", "post it", "list it", "confirm", "yes", "ok", "okay"}
13
 
 
 
 
 
 
 
 
 
 
 
14
  # ---------- node ----------
15
  async def publish_node(state: Dict) -> Dict:
16
  """
17
+ LangGraph node:
18
+ - If user says "publish" → save draft to MongoDB as PUBLISHED
19
+ - Set status to "published"
20
+ - Return success message with MongoDB ID
21
  """
22
+
23
+ # Only process if we have a draft preview waiting
24
+ if state.get("status") != "preview_shown" or not state.get("draft_preview"):
25
  return state
26
+
27
+ last_text = state["messages"][-1]["content"].lower()
28
+
29
+ # Check if user confirmed publish
30
+ if not any(word in last_text for word in PUBLISH_WORDS):
31
+ # User didn't confirm, stay in preview_shown state
32
  state["ai_reply"] = "Waiting for your confirmation. Say **publish** when ready!"
33
  return state
34
+
35
+ # User confirmed! Save to MongoDB
36
  draft_preview = state["draft_preview"]
37
+
38
  try:
39
  db = await get_db()
40
  col = db["listings"]
41
+
42
+ # ✅ Build listing data with all fields including images
43
  listing_data = {
44
  "owner_id": state["user_id"],
45
  "type": draft_preview.get("listing_type"),
 
53
  "amenities": draft_preview.get("amenities", []),
54
  "requirements": draft_preview.get("requirements"),
55
  "currency": draft_preview.get("currency", "XOF"),
56
+ "images": draft_preview.get("images", []), # ✅ INCLUDE IMAGES
57
  "status": "published",
58
  "created_at": datetime.datetime.utcnow(),
59
  "updated_at": datetime.datetime.utcnow(),
60
+ "views": 0, # Initialize view counter
61
+ "favorites": 0, # Initialize favorites counter
62
  }
63
+
64
  result = await col.insert_one(listing_data)
65
  mongo_id = str(result.inserted_id)
66
+
67
+ logger.info("✅ Listing published to MongoDB",
68
+ mongo_id=mongo_id,
69
+ title=draft_preview.get("title"),
70
+ location=draft_preview.get("location"),
71
+ image_count=len(draft_preview.get("images", [])))
72
+
73
+ # Build success message with image info
74
  image_info = ""
75
  if draft_preview.get("images"):
76
+ image_info = f"\n🖼️ With {len(draft_preview['images'])} image(s)"
77
+
78
  state["ai_reply"] = f"""
79
+ ✅ **Listing Live!**
80
 
81
  Your property is now published and visible to renters/buyers:
82
  - **Title:** {draft_preview.get('title')}
83
  - **Location:** {draft_preview.get('location')}
84
  - **Price:** {draft_preview.get('price'):,} {draft_preview.get('price_type')}{image_info}
85
 
86
+ 👉 Users can now find your listing when they search!
87
 
88
  Want to list another property? Just say "list a property"
89
  """
 
90
  state["status"] = "published"
91
  state["mongo_id"] = mongo_id
92
+
 
93
  except Exception as e:
94
+ logger.error("❌ Failed to publish listing", exc_info=e)
95
  state["ai_reply"] = "Sorry, I couldn't publish your listing. Please try again."
96
  state["status"] = "error"
97
+
98
  return state
app/ai/nodes/role_gate_node.py CHANGED
@@ -13,23 +13,23 @@ def role_gate_node(state: Dict) -> Dict:
13
  - input: state with allowed, listing_type, user_role, intent
14
  - output: same state (may flip allowed + set ai_reply)
15
  """
16
- # 1. search intent skip role check entirely
17
  if state.get("intent") == "search":
18
- logger.info(" Passed search intent")
19
  return state
20
 
21
  # 2. already blocked by intent node? keep message
22
  if not state.get("allowed", False):
23
- logger.info("🚫 Already denied", reason="intent")
24
  return state
25
 
26
- # 3. casual chat (no listing_type) leave reply untouched
27
  list_type = state.get("listing_type")
28
  if list_type is None:
29
- logger.info(" Passed casual chat")
30
  return state
31
 
32
- # 4. real-estate listing role check
33
  allowed_set = LANDLORD_ALLOWED if state["user_role"] == "landlord" else RENTER_ALLOWED
34
  if list_type not in allowed_set:
35
  state["allowed"] = False
@@ -43,9 +43,9 @@ def role_gate_node(state: Dict) -> Dict:
43
  "As a renter you can only list for roommate matching. "
44
  "If you want to share your own space, great! Otherwise you can search listings."
45
  )
46
- logger.info("🚫 Denied by role gate", user_role=state["user_role"], type=list_type)
47
  else:
48
  state["ai_reply"] = state.get("ai_reply") or "Got it! Let me draft that for you."
49
- logger.info(" Approved", user_role=state["user_role"], type=list_type)
50
 
51
  return state
 
13
  - input: state with allowed, listing_type, user_role, intent
14
  - output: same state (may flip allowed + set ai_reply)
15
  """
16
+ # 1. search intent → skip role check entirely
17
  if state.get("intent") == "search":
18
+ logger.info("Aida passed search intent")
19
  return state
20
 
21
  # 2. already blocked by intent node? keep message
22
  if not state.get("allowed", False):
23
+ logger.info("Aida already denied", reason="intent")
24
  return state
25
 
26
+ # 3. casual chat (no listing_type) → leave reply untouched
27
  list_type = state.get("listing_type")
28
  if list_type is None:
29
+ logger.info("Aida passed casual chat")
30
  return state
31
 
32
+ # 4. real-estate listing → role check
33
  allowed_set = LANDLORD_ALLOWED if state["user_role"] == "landlord" else RENTER_ALLOWED
34
  if list_type not in allowed_set:
35
  state["allowed"] = False
 
43
  "As a renter you can only list for roommate matching. "
44
  "If you want to share your own space, great! Otherwise you can search listings."
45
  )
46
+ logger.info("Aida denied by role gate", user_role=state["user_role"], type=list_type)
47
  else:
48
  state["ai_reply"] = state.get("ai_reply") or "Got it! Let me draft that for you."
49
+ logger.info("Aida approved", user_role=state["user_role"], type=list_type)
50
 
51
  return state
app/ai/nodes/search_node.py CHANGED
@@ -1,27 +1,19 @@
1
- # app/ai/nodes/search_node.py - FINAL: Complete error handling + retry logic + observability
2
- import logging
3
  import httpx
4
  from typing import Dict, List
5
  from qdrant_client import AsyncQdrantClient, models
6
- from tenacity import retry, stop_after_attempt, wait_exponential
7
-
8
  from app.config import settings
9
- from app.core.error_handling import (
10
- async_retry,
11
- RetryStrategy,
12
- trace_operation,
13
- VectorDBError,
14
- )
15
 
16
- logger = logging.getLogger(__name__)
17
 
18
  EMBED_MODEL = "qwen/qwen3-embedding-8b"
19
  TOP_K = 6
20
 
21
- # ============================================================
22
- # Qdrant Client
23
- # ============================================================
24
-
25
  qdrant_client = AsyncQdrantClient(
26
  url=settings.QDRANT_URL,
27
  api_key=settings.QDRANT_API_KEY,
@@ -29,26 +21,7 @@ qdrant_client = AsyncQdrantClient(
29
  timeout=60,
30
  )
31
 
32
- # ============================================================
33
- # Custom Exceptions
34
- # ============================================================
35
-
36
- class SearchError(Exception):
37
- """Base exception for search operations."""
38
- pass
39
-
40
- class EmbeddingError(SearchError):
41
- """Embedding generation error."""
42
- pass
43
-
44
- class QdrantSearchError(SearchError):
45
- """Qdrant search error."""
46
- pass
47
-
48
- # ============================================================
49
- # Helpers
50
- # ============================================================
51
-
52
  def _build_filter(state: Dict) -> models.Filter:
53
  """Build comprehensive Qdrant filter from ALL search fields."""
54
  must = []
@@ -63,7 +36,7 @@ def _build_filter(state: Dict) -> models.Filter:
63
  )
64
  )
65
 
66
- # Price range filters
67
  if state.get("min_price") is not None or state.get("max_price") is not None:
68
  price_range = {}
69
  if state.get("min_price") is not None:
@@ -97,7 +70,7 @@ def _build_filter(state: Dict) -> models.Filter:
97
  )
98
  )
99
 
100
- # Price type filter
101
  price_type = (state.get("price_type") or "").lower()
102
  if price_type:
103
  must.append(
@@ -107,7 +80,7 @@ def _build_filter(state: Dict) -> models.Filter:
107
  )
108
  )
109
 
110
- # Listing type filter
111
  listing_type = (state.get("listing_type") or "").lower()
112
  if listing_type:
113
  must.append(
@@ -117,7 +90,7 @@ def _build_filter(state: Dict) -> models.Filter:
117
  )
118
  )
119
 
120
- # Amenities filter
121
  amenities = state.get("amenities", [])
122
  if amenities:
123
  for amenity in amenities:
@@ -131,94 +104,46 @@ def _build_filter(state: Dict) -> models.Filter:
131
  )
132
 
133
  filt = models.Filter(must=must) if must else models.Filter()
134
- logger.info(
135
- "🔍 Filter built",
136
- extra={
137
- "conditions": len(must),
138
- "location": loc,
139
- "price_range": f"{state.get('min_price')}-{state.get('max_price')}",
140
- "bedrooms": state.get("bedrooms"),
141
- "amenities": len(amenities),
142
- }
143
- )
144
  return filt
145
 
146
- @async_retry(strategy=RetryStrategy.MODERATE, operation_name="embedding_generation")
147
  async def _embed(text: str) -> List[float]:
148
- """
149
- Generate embedding with retry logic and error handling.
150
-
151
- Raises:
152
- EmbeddingError: If embedding generation fails
153
- """
154
-
155
- with trace_operation("embedding_generation", {"text_length": len(text)}):
156
- if not text or not text.strip():
157
- logger.error(" Empty text provided for embedding")
158
- raise EmbeddingError("Empty text provided for embedding")
159
-
160
- payload = {
161
- "model": EMBED_MODEL,
162
- "input": text,
163
- "encoding_format": "float",
164
- }
165
- headers = {
166
- "Authorization": f"Bearer {settings.OPENROUTER_API_KEY}",
167
- "Content-Type": "application/json",
168
- "HTTP-Referer": "",
169
- "X-Title": "",
170
- }
171
-
172
- try:
173
- async with httpx.AsyncClient(timeout=60) as client:
174
- logger.info("🤖 Calling embedding API")
175
- resp = await client.post(
176
- "https://openrouter.ai/api/v1/embeddings",
177
- headers=headers,
178
- json=payload,
179
- timeout=60,
180
- )
181
- resp.raise_for_status()
182
-
183
- data = resp.json()
184
- if not data.get("data"):
185
- logger.error("❌ Empty embedding response")
186
- raise EmbeddingError("Empty embedding response from API")
187
-
188
- embedding = data["data"][0]["embedding"]
189
- logger.info(f"✅ Embedding generated, dimension={len(embedding)}")
190
- return embedding
191
-
192
- except httpx.HTTPError as e:
193
- logger.error(f"❌ Embedding API HTTP error: {e}", exc_info=True)
194
- raise EmbeddingError(f"HTTP error calling embedding API: {e}")
195
- except KeyError as e:
196
- logger.error(f"❌ Embedding response malformed: {e}", exc_info=True)
197
- raise EmbeddingError(f"Malformed embedding response: {e}")
198
- except Exception as e:
199
- logger.error(f"❌ Unexpected embedding error: {e}", exc_info=True)
200
- raise EmbeddingError(f"Unexpected error generating embedding: {e}")
201
 
202
- @async_retry(strategy=RetryStrategy.MODERATE, operation_name="qdrant_search")
203
  async def _search_with_must(must: List, vector: List[float]):
204
- """Execute Qdrant search with retry logic."""
205
-
206
- with trace_operation("qdrant_search_execution", {"filter_count": len(must)}):
207
- try:
208
- logger.info("🔎 Executing Qdrant search")
209
- hits = await qdrant_client.search(
210
- collection_name="listings",
211
- query_vector=vector,
212
- query_filter=models.Filter(must=must),
213
- limit=TOP_K,
214
- with_payload=True,
215
- )
216
- logger.info(f"✅ Qdrant search returned {len(hits)} results")
217
- return hits
218
-
219
- except Exception as e:
220
- logger.error(f"❌ Qdrant search failed: {e}", exc_info=True)
221
- raise QdrantSearchError(f"Qdrant search failed: {e}")
222
 
223
  def _add_price_range(must: List, state: Dict):
224
  """Add combined price range filter."""
@@ -230,225 +155,143 @@ def _add_price_range(must: List, state: Dict):
230
  price_range["lte"] = state["max_price"]
231
 
232
  if price_range:
233
- must.append(
234
- models.FieldCondition(
235
- key="price",
236
- range=models.Range(**price_range)
237
- )
238
- )
239
 
240
- def _hits_to_cards(hits) -> List[dict]:
241
  """Convert Qdrant hits to UI cards."""
242
- cards = []
243
- for hit in hits:
244
- try:
245
- card = {
246
- "id": hit.id,
247
- "title": (
248
- hit.payload.get("title") or
249
- f"{hit.payload.get('bedrooms', '')}-bed {hit.payload.get('location', '')}"
250
- ),
251
- "location": hit.payload.get("location"),
252
- "price": hit.payload.get("price"),
253
- "price_type": hit.payload.get("price_type"),
254
- "bedrooms": hit.payload.get("bedrooms"),
255
- "bathrooms": hit.payload.get("bathrooms"),
256
- "amenities": hit.payload.get("amenities", []),
257
- "description": hit.payload.get("description"),
258
- "listing_type": hit.payload.get("listing_type"),
259
- "images": hit.payload.get("images", []),
260
- "currency": hit.payload.get("currency", "XOF"),
261
- }
262
- cards.append(card)
263
- except Exception as e:
264
- logger.warning(f"⚠️ Failed to convert hit to card: {e}")
265
- continue
266
-
267
- return cards
268
 
269
  async def _suggest_relaxed(state: Dict, vector: List[float]) -> List[dict]:
270
  """
271
  Loosen constraints progressively while keeping location strict.
272
- Returns relaxed search results if exact match not found.
273
  """
274
-
275
- with trace_operation("suggest_relaxed", {"location": state.get("location")}):
276
- loc = (state.get("location") or "").lower()
277
- br = state.get("bedrooms")
278
- amenities = state.get("amenities", [])
279
 
280
- # If no location specified, return empty
281
- if not loc:
282
- logger.warning("⚠️ No location specified for suggestion")
283
- return []
284
 
285
- # Location is ALWAYS a hard constraint
286
- location_filter = models.FieldCondition(
287
- key="location_lower",
288
- match=models.MatchValue(value=loc)
289
- )
290
 
291
- # Try progressively looser constraints
292
-
293
- # 1. All filters
294
- try:
295
- logger.info("🔄 Trying search with all filters")
296
- must = [location_filter]
297
- if br is not None:
298
- must.append(
299
- models.FieldCondition(
300
- key="bedrooms",
301
- match=models.MatchValue(value=br)
302
- )
303
- )
304
- _add_price_range(must, state)
305
- for amenity in amenities:
306
- must.append(
307
- models.FieldCondition(
308
- key="amenities",
309
- match=models.MatchValue(value=amenity.lower())
310
- )
311
- )
312
- hits = await _search_with_must(must, vector)
313
- if hits:
314
- logger.info("✅ Found results with all filters")
315
- return _hits_to_cards(hits)
316
- except Exception as e:
317
- logger.warning(f"⚠️ Search with all filters failed: {e}")
318
 
319
- # 2. No amenities
320
- try:
321
- logger.info("🔄 Trying search without amenities")
322
- must = [location_filter]
323
- if br is not None:
324
- must.append(
325
- models.FieldCondition(
326
- key="bedrooms",
327
- match=models.MatchValue(value=br)
328
- )
329
- )
330
- _add_price_range(must, state)
331
- hits = await _search_with_must(must, vector)
332
- if hits:
333
- logger.info("✅ Found results without amenities")
334
- return _hits_to_cards(hits)
335
- except Exception as e:
336
- logger.warning(f"⚠️ Search without amenities failed: {e}")
337
 
338
- # 3. Adjust bedrooms ±1
339
- if br is not None:
340
- try:
341
- logger.info("🔄 Trying search with adjusted bedrooms")
342
- must = [location_filter]
343
- new_br = br - 1 if br > 1 else br + 1
344
- must.append(
345
- models.FieldCondition(
346
- key="bedrooms",
347
- match=models.MatchValue(value=new_br)
348
- )
349
- )
350
- _add_price_range(must, state)
351
- hits = await _search_with_must(must, vector)
352
- if hits:
353
- logger.info(f"✅ Found results with {new_br} bedrooms")
354
- return _hits_to_cards(hits)
355
- except Exception as e:
356
- logger.warning(f"⚠️ Search with adjusted bedrooms failed: {e}")
357
 
358
- # 4. Relax price +25%
359
- try:
360
- logger.info("🔄 Trying search with relaxed price")
361
- must = [location_filter]
362
- if br is not None:
363
- must.append(
364
- models.FieldCondition(
365
- key="bedrooms",
366
- match=models.MatchValue(value=br)
367
- )
368
- )
369
- if state.get("max_price") is not None:
370
- relaxed_max = int(state["max_price"] * 1.25)
371
- must.append(
372
- models.FieldCondition(
373
- key="price",
374
- range=models.Range(lte=relaxed_max)
375
- )
376
- )
377
- else:
378
- _add_price_range(must, state)
379
- hits = await _search_with_must(must, vector)
380
- if hits:
381
- logger.info("✅ Found results with relaxed price")
382
- return _hits_to_cards(hits)
383
- except Exception as e:
384
- logger.warning(f"⚠️ Search with relaxed price failed: {e}")
385
 
386
- logger.warning("⚠️ No results found even with relaxed criteria")
387
- return []
388
 
389
- # ============================================================
390
- # Search Node
391
- # ============================================================
392
 
 
393
  async def search_node(state: Dict) -> Dict:
394
  """
395
- LangGraph node: Comprehensive search with all filters
 
 
 
396
 
397
- Features:
398
- - Semantic search via embeddings
399
- - Keyword filters (location, price, bedrooms, etc.)
400
- - Error handling with graceful fallback
401
- - Suggestion system for zero-hit scenarios
402
- - UI-ready response format
403
  """
 
 
 
 
 
404
 
405
- with trace_operation(
406
- "search_node",
407
- {
408
- "location": state.get("location"),
409
- "bedrooms": state.get("bedrooms"),
410
- "min_price": state.get("min_price"),
411
- "max_price": state.get("max_price"),
412
- }
413
- ):
414
- query = state.get("search_query", "") or state["messages"][-1]["content"]
415
- logger.info(f"🔍 Starting search for: {query[:100]}")
416
-
417
- # ===== Generate embedding =====
418
- try:
419
- with trace_operation("embed_query"):
420
- vector = await _embed(query)
421
- logger.info(" Query embedded successfully")
422
- except EmbeddingError as e:
423
- logger.error(f"❌ Embedding failed: {e}")
424
- state["ai_reply"] = "Sorry, I couldn't process your search right now. Please try again."
425
- state["search_preview"] = {
426
- "type": "search_results",
427
- "count": 0,
428
- "query": query,
429
- "filters": {},
430
- "results": [],
431
- "message": "Search temporarily unavailable",
432
- "error": "embedding_failed",
433
- }
434
- state["search_results"] = []
435
- return state
436
-
437
- # ===== Build filter =====
438
- filt = _build_filter(state)
439
-
440
- # ===== Execute search =====
441
- try:
442
- with trace_operation("execute_search"):
443
- logger.info("🔎 Executing search")
444
- hits = await _search_with_must(filt.must if filt.must else [], vector)
445
- logger.info(f"✅ Search returned {len(hits)} results")
446
- except QdrantSearchError as e:
447
- logger.error(f"❌ Search failed: {e}")
448
- state["ai_reply"] = "Sorry, I'm having trouble searching right now. Please try again."
449
- state["search_preview"] = {
450
  "type": "search_results",
451
- "count": 0,
452
  "query": query,
453
  "filters": {
454
  "location": state.get("location"),
@@ -456,77 +299,22 @@ async def search_node(state: Dict) -> Dict:
456
  "max_price": state.get("max_price"),
457
  "bedrooms": state.get("bedrooms"),
458
  "bathrooms": state.get("bathrooms"),
 
 
459
  "amenities": state.get("amenities", []),
460
  },
461
- "results": [],
462
- "message": "Search service temporarily unavailable",
463
- "error": "search_failed",
464
  }
465
- state["search_results"] = []
466
- return state
467
-
468
- cards = _hits_to_cards(hits)
469
-
470
- # ===== Handle zero-hit scenario =====
471
- if not cards:
472
- logger.info("ℹ️ No exact matches found, generating suggestions")
473
- location = state.get("location") or "that area"
474
-
475
- try:
476
- suggestions = await _suggest_relaxed(state, vector)
477
- except Exception as e:
478
- logger.warning(f"⚠️ Failed to generate suggestions: {e}")
479
- suggestions = []
480
-
481
- if suggestions:
482
- state["ai_reply"] = (
483
- f"I found no exact match for your request, "
484
- f"but you might like these similar options:"
485
- )
486
- state["search_preview"] = {
487
- "type": "search_results",
488
- "count": len(suggestions),
489
- "query": query,
490
- "filters": {
491
- "location": state.get("location"),
492
- "min_price": state.get("min_price"),
493
- "max_price": state.get("max_price"),
494
- "bedrooms": state.get("bedrooms"),
495
- "bathrooms": state.get("bathrooms"),
496
- "price_type": state.get("price_type"),
497
- "listing_type": state.get("listing_type"),
498
- "amenities": state.get("amenities", []),
499
- },
500
- "results": suggestions,
501
- "message": "Similar options available",
502
- }
503
- state["search_results"] = suggestions
504
- else:
505
- state["ai_reply"] = f"I found no property in {location}. Try widening your search or check back later!"
506
- state["search_preview"] = {
507
- "type": "search_results",
508
- "count": 0,
509
- "query": query,
510
- "filters": {
511
- "location": state.get("location"),
512
- "min_price": state.get("min_price"),
513
- "max_price": state.get("max_price"),
514
- "bedrooms": state.get("bedrooms"),
515
- "bathrooms": state.get("bathrooms"),
516
- "price_type": state.get("price_type"),
517
- "listing_type": state.get("listing_type"),
518
- "amenities": state.get("amenities", []),
519
- },
520
- "results": [],
521
- "message": "No results found",
522
- }
523
- state["search_results"] = []
524
  else:
525
- logger.info(f" Found {len(cards)} results")
526
- state["ai_reply"] = f"Here are {len(cards)} places I found for you:"
527
- state["search_preview"] = {
 
 
528
  "type": "search_results",
529
- "count": len(cards),
530
  "query": query,
531
  "filters": {
532
  "location": state.get("location"),
@@ -538,10 +326,30 @@ async def search_node(state: Dict) -> Dict:
538
  "listing_type": state.get("listing_type"),
539
  "amenities": state.get("amenities", []),
540
  },
541
- "results": cards,
542
- "message": f"Found {len(cards)} listings",
543
  }
544
- state["search_results"] = cards
545
-
546
- logger.info("✅ Search node completed")
547
- return state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/ai/nodes/search_node.py - Return UI-ready search results
2
+ import json
3
  import httpx
4
  from typing import Dict, List
5
  from qdrant_client import AsyncQdrantClient, models
 
 
6
  from app.config import settings
7
+ from structlog import get_logger
 
 
 
 
 
8
 
9
+ logger = get_logger(__name__)
10
 
11
  EMBED_MODEL = "qwen/qwen3-embedding-8b"
12
  TOP_K = 6
13
 
14
+ # ------------------------------------------------------------------
15
+ # Qdrant client
16
+ # ------------------------------------------------------------------
 
17
  qdrant_client = AsyncQdrantClient(
18
  url=settings.QDRANT_URL,
19
  api_key=settings.QDRANT_API_KEY,
 
21
  timeout=60,
22
  )
23
 
24
+ # ---------- helpers ----------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  def _build_filter(state: Dict) -> models.Filter:
26
  """Build comprehensive Qdrant filter from ALL search fields."""
27
  must = []
 
36
  )
37
  )
38
 
39
+ # Price range filters (combine into single condition)
40
  if state.get("min_price") is not None or state.get("max_price") is not None:
41
  price_range = {}
42
  if state.get("min_price") is not None:
 
70
  )
71
  )
72
 
73
+ # Price type filter (monthly, nightly, yearly, etc.)
74
  price_type = (state.get("price_type") or "").lower()
75
  if price_type:
76
  must.append(
 
80
  )
81
  )
82
 
83
+ # Listing type filter (rent, short_stay, roommate, sale)
84
  listing_type = (state.get("listing_type") or "").lower()
85
  if listing_type:
86
  must.append(
 
90
  )
91
  )
92
 
93
+ # Amenities filter (all mentioned amenities must exist in listing)
94
  amenities = state.get("amenities", [])
95
  if amenities:
96
  for amenity in amenities:
 
104
  )
105
 
106
  filt = models.Filter(must=must) if must else models.Filter()
107
+ logger.info("🔍 Filter built", must_conditions=len(must), location=loc,
108
+ min_price=state.get("min_price"), max_price=state.get("max_price"),
109
+ bedrooms=state.get("bedrooms"), bathrooms=state.get("bathrooms"),
110
+ amenities=amenities, price_type=price_type, listing_type=listing_type)
 
 
 
 
 
 
111
  return filt
112
 
113
+
114
  async def _embed(text: str) -> List[float]:
115
+ """Call OpenRouter embedding endpoint (async)."""
116
+ payload = {
117
+ "model": EMBED_MODEL,
118
+ "input": text,
119
+ "encoding_format": "float",
120
+ }
121
+ headers = {
122
+ "Authorization": f"Bearer {settings.OPENROUTER_API_KEY}",
123
+ "Content-Type": "application/json",
124
+ "HTTP-Referer": "",
125
+ "X-Title": "",
126
+ }
127
+ async with httpx.AsyncClient(timeout=60) as client:
128
+ resp = await client.post(
129
+ "https://openrouter.ai/api/v1/embeddings",
130
+ headers=headers,
131
+ json=payload,
132
+ )
133
+ resp.raise_for_status()
134
+ return resp.json()["data"][0]["embedding"]
135
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
+ # ---------- suggestion helpers ----------
138
  async def _search_with_must(must: List, vector: List[float]):
139
+ return await qdrant_client.search(
140
+ collection_name="listings",
141
+ query_vector=vector,
142
+ query_filter=models.Filter(must=must),
143
+ limit=TOP_K,
144
+ with_payload=True,
145
+ )
146
+
 
 
 
 
 
 
 
 
 
 
147
 
148
  def _add_price_range(must: List, state: Dict):
149
  """Add combined price range filter."""
 
155
  price_range["lte"] = state["max_price"]
156
 
157
  if price_range:
158
+ must.append(models.FieldCondition(key="price", range=models.Range(**price_range)))
159
+
 
 
 
 
160
 
161
+ def _hits_to_cards(hits):
162
  """Convert Qdrant hits to UI cards."""
163
+ return [
164
+ {
165
+ "id": hit.id,
166
+ "title": hit.payload.get("title") or f"{hit.payload.get('bedrooms', '')}-bed {hit.payload.get('location', '')}",
167
+ "location": hit.payload.get("location"),
168
+ "price": hit.payload.get("price"),
169
+ "price_type": hit.payload.get("price_type"),
170
+ "bedrooms": hit.payload.get("bedrooms"),
171
+ "bathrooms": hit.payload.get("bathrooms"),
172
+ "amenities": hit.payload.get("amenities", []),
173
+ "description": hit.payload.get("description"),
174
+ "listing_type": hit.payload.get("listing_type"),
175
+ "images": hit.payload.get("images", []), # ✅ Include images for UI
176
+ "currency": hit.payload.get("currency", "XOF"),
177
+ }
178
+ for hit in hits
179
+ ]
180
+
 
 
 
 
 
 
 
 
181
 
182
  async def _suggest_relaxed(state: Dict, vector: List[float]) -> List[dict]:
183
  """
184
  Loosen constraints progressively while keeping location strict.
185
+ If no location specified, return empty.
186
  """
187
+ loc = (state.get("location") or "").lower()
188
+ br = state.get("bedrooms")
189
+ amenities = state.get("amenities", [])
 
 
190
 
191
+ # If no location specified, return empty
192
+ if not loc:
193
+ return []
 
194
 
195
+ # Location is ALWAYS a hard constraint
196
+ location_filter = models.FieldCondition(
197
+ key="location_lower",
198
+ match=models.MatchValue(value=loc)
199
+ )
200
 
201
+ # 1. Try with all filters intact
202
+ must = [location_filter]
203
+ if br is not None:
204
+ must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
205
+ _add_price_range(must, state)
206
+ for amenity in amenities:
207
+ must.append(models.FieldCondition(key="amenities", match=models.MatchValue(value=amenity.lower())))
208
+ hits = await _search_with_must(must, vector)
209
+ if hits:
210
+ return _hits_to_cards(hits)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
 
212
+ # 2. Loosen amenities (remove optional ones)
213
+ must = [location_filter]
214
+ if br is not None:
215
+ must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
216
+ _add_price_range(must, state)
217
+ hits = await _search_with_must(must, vector)
218
+ if hits:
219
+ return _hits_to_cards(hits)
 
 
 
 
 
 
 
 
 
 
220
 
221
+ # 3. Loosen bedrooms ±1, keep location strict
222
+ if br is not None:
223
+ must = [location_filter]
224
+ new_br = br - 1 if br > 1 else br + 1
225
+ must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=new_br)))
226
+ _add_price_range(must, state)
227
+ hits = await _search_with_must(must, vector)
228
+ if hits:
229
+ return _hits_to_cards(hits)
 
 
 
 
 
 
 
 
 
 
230
 
231
+ # 4. Loosen price +25%, keep location strict
232
+ must = [location_filter]
233
+ if br is not None:
234
+ must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
235
+ if state.get("max_price") is not None:
236
+ relaxed_max = int(state["max_price"] * 1.25)
237
+ must.append(models.FieldCondition(key="price", range=models.Range(lte=relaxed_max)))
238
+ else:
239
+ _add_price_range(must, state)
240
+ hits = await _search_with_must(must, vector)
241
+ if hits:
242
+ return _hits_to_cards(hits)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
 
244
+ return []
 
245
 
 
 
 
246
 
247
+ # ---------- node ----------
248
  async def search_node(state: Dict) -> Dict:
249
  """
250
+ LangGraph node: comprehensive search with all filters
251
+ - title/description (semantic via embedding)
252
+ - location, price range, bedrooms, bathrooms, amenities (keyword filters)
253
+ - price_type, listing_type
254
 
255
+ ✅ RETURNS: search_results + search_preview (UI-ready)
 
 
 
 
 
256
  """
257
+ query = state.get("search_query", "") or state["messages"][-1]["content"]
258
+ vector = await _embed(query)
259
+ filt = _build_filter(state)
260
+
261
+ logger.info("🔍 Searching Qdrant", query=query, filter=str(filt))
262
 
263
+ hits = await qdrant_client.search(
264
+ collection_name="listings",
265
+ query_vector=vector,
266
+ query_filter=filt,
267
+ limit=TOP_K,
268
+ with_payload=True,
269
+ )
270
+
271
+ logger.info("📊 Qdrant search result", hits_count=len(hits))
272
+ cards = _hits_to_cards(hits)
273
+
274
+ # --- personalize zero-hit reply + suggestions + UI preview
275
+ if not cards:
276
+ location = state.get("location") or "that area"
277
+ bedrooms = state.get("bedrooms")
278
+ price_bit = (
279
+ " in your price range"
280
+ if state.get("min_price") is not None or state.get("max_price") is not None
281
+ else ""
282
+ )
283
+ br_bit = f" with {bedrooms} bedrooms" if bedrooms else ""
284
+ amenities_bit = f" with {', '.join(state.get('amenities', []))}" if state.get("amenities") else ""
285
+
286
+ suggestions = await _suggest_relaxed(state, vector)
287
+ if suggestions:
288
+ state["ai_reply"] = (
289
+ f"I found no exact match for your request, "
290
+ f"but you might like these similar options:"
291
+ )
292
+ state["search_preview"] = { # ✅ NEW: UI-ready preview for suggestions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  "type": "search_results",
294
+ "count": len(suggestions),
295
  "query": query,
296
  "filters": {
297
  "location": state.get("location"),
 
299
  "max_price": state.get("max_price"),
300
  "bedrooms": state.get("bedrooms"),
301
  "bathrooms": state.get("bathrooms"),
302
+ "price_type": state.get("price_type"),
303
+ "listing_type": state.get("listing_type"),
304
  "amenities": state.get("amenities", []),
305
  },
306
+ "results": suggestions,
307
+ "message": "Similar options available"
 
308
  }
309
+ state["search_results"] = suggestions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  else:
311
+ state["ai_reply"] = (
312
+ f"I found no property in {location}{price_bit}{br_bit}{amenities_bit}. "
313
+ "Try widening your search or check back later!"
314
+ )
315
+ state["search_preview"] = { # ✅ Empty UI state
316
  "type": "search_results",
317
+ "count": 0,
318
  "query": query,
319
  "filters": {
320
  "location": state.get("location"),
 
326
  "listing_type": state.get("listing_type"),
327
  "amenities": state.get("amenities", []),
328
  },
329
+ "results": [],
330
+ "message": "No results found"
331
  }
332
+ else:
333
+ state["ai_reply"] = f"Here are {len(cards)} places I found for you:"
334
+ # ✅ NEW: Return UI-ready search preview
335
+ state["search_preview"] = {
336
+ "type": "search_results",
337
+ "count": len(cards),
338
+ "query": query,
339
+ "filters": {
340
+ "location": state.get("location"),
341
+ "min_price": state.get("min_price"),
342
+ "max_price": state.get("max_price"),
343
+ "bedrooms": state.get("bedrooms"),
344
+ "bathrooms": state.get("bathrooms"),
345
+ "price_type": state.get("price_type"),
346
+ "listing_type": state.get("listing_type"),
347
+ "amenities": state.get("amenities", []),
348
+ },
349
+ "results": cards,
350
+ "message": f"Found {len(cards)} listings"
351
+ }
352
+ state["search_results"] = cards
353
+
354
+ logger.info("Aida search finished", query=query, count=len(cards), suggestions=len(state.get("suggestions", [])))
355
+ return state
app/ai/prompts/system_prompt.txt CHANGED
@@ -1,89 +1,76 @@
1
- You are Aida, Lojiz's friendly real-estate AI assistant.
2
- User role: {user_role} (landlord | renter | airbnb_host | roommate_seeker | seller | buyer)
3
 
4
  ------------------------------------------------------
5
- BRAND IDENTITY
6
  ------------------------------------------------------
7
 
8
- WHO YOU ARE:
9
  - You are AIDA, Lojiz's proprietary real-estate AI assistant
10
  - Do NOT mention DeepSeek, GPT, or any other LLM names
11
  - You are a standalone AI developed specifically for Lojiz
12
  - Never refer to yourself as based on other AI models
13
 
14
- IF ASKED "WHO ARE YOU?":
15
- "I'm Aida, Lojiz's AI real-estate assistant. I help landlords list properties, renters find homes, and more!"
16
 
17
- IF ASKED "WHO DESIGNED YOU?":
18
  "I was designed and developed by the Lojiz team. Our talented team includes:
19
- - Emeka Destiny Ebuka - Lead AI & ML Engineer/Developer
20
- - Uche Samuel Chukwuemeka - Product Designer
 
21
  Together, they created me to make real-estate transactions easier and more accessible for everyone."
22
 
23
- IF ASKED "ARE YOU AN LLM?":
24
- "I'm Aida, Lojiz's specialized AI assistant. I'm not just a general-purpose AI - I'm specifically built to understand real-estate, help with property listings, and guide users through the Lojiz platform."
25
 
26
- NEVER SAY:
27
- - "I'm based on DeepSeek"
28
- - "I'm powered by GPT"
29
- - "I'm an LLM (Language Model)"
30
 
31
- ------------------------------------------------------
32
- PERSONALITY
33
- ------------------------------------------------------
34
- - Speak like a human, short and warm
35
- - IGNORE typos and grammar mistakes. Understand intent anyway
36
- - If user talks off-topic, reply casually and ask: "Would you like to list a property or search for one?"
37
- - Only switch to JSON when property details are mentioned
38
- - Be proud of being Aida - make it clear you're specialized in real-estate
39
 
40
  ------------------------------------------------------
41
- ROLE-BASED BEHAVIOR
42
  ------------------------------------------------------
43
-
44
- Detect and adapt to different user roles:
45
-
46
- LANDLORD:
47
- - Help list properties for rent or sale
48
- - Focus on: location, price, amenities, tenant requirements
49
- - Price types: monthly, yearly
50
- - Listing types: rent, short-stay, sale
51
-
52
- RENTER/TENANT:
53
- - Help search for rental properties
54
- - Focus on: budget, location, amenities needed
55
- - Can also list rooms for roommate matching
56
- - Listing types: roommate
57
-
58
- AIRBNB HOST:
59
- - Help list short-stay vacation properties
60
- - Focus on: guest amenities, location convenience
61
- - Price types: nightly, daily, weekly
62
- - Listing type: short-stay
63
-
64
- ROOMMATE SEEKER:
65
- - Help find rooms to share or advertise spare rooms
66
- - Focus on: compatibility, house rules, utilities included
67
- - Price types: monthly, yearly
68
- - Listing type: roommate
69
-
70
- SELLER (Property Sale):
71
- - Help list properties for sale
72
- - Focus on: property condition, location, price
73
- - Listing type: sale
74
-
75
- BUYER:
76
- - Help search for properties to purchase
77
- - Focus on: budget range, location, property type
78
 
79
  ------------------------------------------------------
80
- PRESERVE USER CORRECTIONS
81
  ------------------------------------------------------
82
- IMPORTANT: If a user corrects ANY field (location, bedrooms, price, etc.), PRESERVE that correction
83
  - Do NOT regenerate or revert corrections when showing previews
84
  - Example: If user says "actually it's in Calavi not Cotonou", update state AND keep it that way
85
- - When showing draft preview, always use the LATEST corrected values
86
- - If a user requests changes, apply them WITHOUT reverting previous corrections
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  ------------------------------------------------------
89
  LISTING CREATION - PROGRESSIVE COLLECTION
@@ -104,26 +91,27 @@ OPTIONAL FIELDS (Ask, but not required):
104
  AUTO-GENERATED:
105
  - title (AI generates from location, bedrooms, listing_type)
106
  - description (AI generates professional description)
107
- - currency (auto-detect from location: Lagos=NGN, Cotonou=XOF, etc.)
 
108
 
109
  LOCATION EXTRACTION:
110
  - Extract ONLY the city/area name
111
  - Ignore long descriptions
112
  - Examples:
113
- "calavi quartier zogbadje" -> location: "calavi"
114
- "VI in Lagos" -> location: "lagos"
115
- "Lekki, Lagos" -> location: "lagos"
116
 
117
  LISTING TYPE AUTO-DETECTION:
118
- - "for rent" / "monthly" / "yearly" -> rent
119
- - "short stay" / "nightly" / "daily" / "weekly" -> short-stay
120
- - "for sale" / "selling" -> sale
121
- - "roommate" / "sharing" / "flatmate" -> roommate
122
 
123
  PRICE TYPE AUTO-DETECTION:
124
- - "monthly" / "month" / "per month" / "mth" -> monthly
125
- - "nightly" / "night" / "per night" / "daily" / "day" -> nightly
126
- - "yearly" / "year" / "per year" / "annum" -> yearly
127
 
128
  PROGRESSIVE COLLECTION FLOW:
129
  1. User provides initial info (may be incomplete)
@@ -132,10 +120,10 @@ PROGRESSIVE COLLECTION FLOW:
132
  4. Ask for missing fields ONE AT A TIME
133
  5. User provides each field (or corrects previous ones)
134
  6. Append/update to existing fields
135
- 7. When ALL required fields complete -> Ask for optional fields
136
- 8. When amenities/requirements collected -> Generate DRAFT
137
  9. Show DRAFT preview to user
138
- 10. User can request changes - PRESERVE all corrections
139
  11. User reviews and says "publish" to confirm
140
 
141
  ------------------------------------------------------
@@ -147,7 +135,7 @@ When user starts listing a property:
147
  2. Check for missing REQUIRED fields
148
  3. Ask missing fields one by one
149
  4. Build up state progressively
150
- 5. PRESERVE all corrections and changes
151
 
152
  Response format while collecting:
153
  {
@@ -162,7 +150,7 @@ Response format while collecting:
162
  "requirements": null,
163
  "status": "collecting",
164
  "missing_fields": ["amenities", "requirements"],
165
- "next_question": "Any amenities? (e.g., wifi, parking, balcony, pool, furnished, kitchen)",
166
  "ai_reply": "Great! I have: 2-bed in Lagos, 50k/month. Any amenities?"
167
  }
168
 
@@ -183,8 +171,72 @@ When ALL required fields complete:
183
  "draft_preview": null
184
  }
185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  ------------------------------------------------------
187
- SEARCH INTENT
188
  ------------------------------------------------------
189
  When user searches, extract and COMBINE ALL mentioned filters:
190
  - location: city/area (handle typos)
@@ -219,13 +271,11 @@ CRITICAL RULES
219
  4. For listing creation: collect required fields progressively
220
  5. For search: extract all filters at once
221
  6. Return ONLY JSON (no markdown, no preamble)
222
- 7. When all required fields complete -> ask for optional fields ONLY ONCE
223
- 8. When optional fields provided -> show draft preview
224
  9. Wait for "publish" command to save listing
225
- 10. PRESERVE user corrections - never revert them
226
- 11. BRAND RULE: Always refer to yourself as AIDA, never mention other LLMs
227
- 12. BRAND RULE: Give credit to Lojiz team when asked about your creators
228
- 13. BRAND RULE: Stand your ground as a specialized real-estate AI
229
- 14. ROLE RULE: Detect and adapt to user role
230
- 15. ROLE RULE: Adjust required fields based on user role
231
- 16. ROLE RULE: Use role-specific language and focus areas
 
1
+ u are Aida, Lojiz's friendly real-estate AI assistant.
2
+ User role: {user_role} (landlord | renter)
3
 
4
  ------------------------------------------------------
5
+ BRAND IDENTITY & RESPONSES
6
  ------------------------------------------------------
7
 
8
+ **WHO YOU ARE:**
9
  - You are AIDA, Lojiz's proprietary real-estate AI assistant
10
  - Do NOT mention DeepSeek, GPT, or any other LLM names
11
  - You are a standalone AI developed specifically for Lojiz
12
  - Never refer to yourself as based on other AI models
13
 
14
+ **IF ASKED "WHO ARE YOU?"**
15
+ "I'm Aida, Lojiz's AI real-estate assistant. I help landlords list properties and renters find their perfect home!"
16
 
17
+ **IF ASKED "WHO DESIGNED YOU?" OR "WHO CREATED YOU?"**
18
  "I was designed and developed by the Lojiz team. Our talented team includes:
19
+ - **Emeka Destiny Ebuka** - Lead AI & ML Engineer/Developer - designed my intelligence and learning capabilities
20
+ - **Uche Samuel Chukwuemeka** - Product Designer - shaped my user experience and personality
21
+
22
  Together, they created me to make real-estate transactions easier and more accessible for everyone."
23
 
24
+ **IF ASKED "WHO BUILT YOU?" OR "WHAT COMPANY MADE YOU?"**
25
+ "I'm built by Lojiz, a real-estate platform designed to connect landlords and renters seamlessly across Africa. The Lojiz team created me with passion and expertise in AI and real-estate technology."
26
 
27
+ **IF ASKED "ARE YOU AN LLM?" OR "WHAT MODEL ARE YOU?"**
28
+ "I'm Aida, Lojiz's specialized AI assistant. I'm not just a general-purpose AI - I'm specifically built to understand real-estate, help with property listings, and guide users through the Lojiz platform. That's what makes me different!"
 
 
29
 
30
+ **NEVER SAY:**
31
+ - ❌ "I'm based on DeepSeek"
32
+ - ❌ "I'm powered by GPT"
33
+ - ❌ "I'm an LLM (Language Model)"
34
+ - ❌ "I was trained on general data"
35
+ - ✅ Say "I'm Aida, Lojiz's AI assistant" instead
 
 
36
 
37
  ------------------------------------------------------
38
+ PERSONALITY & GRAMMAR TOLERANCE
39
  ------------------------------------------------------
40
+ - Speak like a human, short and warm.
41
+ - IGNORE typos, grammar mistakes, and spelling errors. Understand intent anyway.
42
+ - If the user greets or talks off-topic, reply casually and ask: "Would you like to list a property or search for one?"
43
+ - Only switch to JSON output when property details are mentioned OR when user clearly wants to SEARCH / SEE / FIND houses/flats/rooms/apartments.
44
+ - Be proud of being Aida - make it clear you're a specialized real-estate AI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  ------------------------------------------------------
47
+ CRITICAL: PRESERVE USER CORRECTIONS
48
  ------------------------------------------------------
49
+ - ⚠️ IMPORTANT: If a user corrects ANY field (location, bedrooms, price, etc.), PRESERVE that correction
50
  - Do NOT regenerate or revert corrections when showing previews
51
  - Example: If user says "actually it's in Calavi not Cotonou", update state AND keep it that way
52
+ - When showing draft preview, always use the LATEST corrected values from state
53
+ - If a user requests changes (amenity icons, requirements, etc.), apply them WITHOUT reverting previous corrections
54
+
55
+ ------------------------------------------------------
56
+ AMENITY ICONS
57
+ ------------------------------------------------------
58
+ When user requests "icons" or "emojis" for amenities, respond with this format:
59
+
60
+ 📶 Wifi | 🅿️ Parking | 🛋️ Furnished | 🧼 Washing Machine | 🔥 Dryer | 🏞️ Balcony | 🏊 Pool | 💪 Gym | 🌿 Garden | ❄️ Air Conditioning | 🍳 Kitchen
61
+
62
+ Backend will automatically add icons to all amenities listed:
63
+ - wifi → 📶
64
+ - parking → 🅿️
65
+ - furnished → 🛋️
66
+ - washing machine / washing → 🧼
67
+ - dryer → 🔥
68
+ - balcony → 🏞️
69
+ - pool → 🏊
70
+ - gym → 💪
71
+ - garden → 🌿
72
+ - air conditioning / ac → ❄️
73
+ - kitchen → 🍳
74
 
75
  ------------------------------------------------------
76
  LISTING CREATION - PROGRESSIVE COLLECTION
 
91
  AUTO-GENERATED:
92
  - title (AI generates from location, bedrooms, listing_type)
93
  - description (AI generates professional description)
94
+ - currency (auto-detect from location: Lagos→NGN, Cotonou→XOF, etc.)
95
+ - amenities_with_icons (backend adds icons to all amenities)
96
 
97
  LOCATION EXTRACTION:
98
  - Extract ONLY the city/area name
99
  - Ignore long descriptions
100
  - Examples:
101
+ "calavi quartier zogbadje" → location: "calavi"
102
+ "VI in Lagos" → location: "lagos"
103
+ "Lekki, Lagos" → location: "lagos"
104
 
105
  LISTING TYPE AUTO-DETECTION:
106
+ - "for rent" / "monthly" / "yearly" → rent
107
+ - "short stay" / "nightly" / "daily" / "weekly" → short-stay
108
+ - "for sale" / "selling" → sale
109
+ - "roommate" / "sharing" / "flatmate" → roommate
110
 
111
  PRICE TYPE AUTO-DETECTION:
112
+ - "monthly" / "month" / "per month" / "mth" → monthly
113
+ - "nightly" / "night" / "per night" / "daily" / "day" → nightly
114
+ - "yearly" / "year" / "per year" / "annum" → yearly
115
 
116
  PROGRESSIVE COLLECTION FLOW:
117
  1. User provides initial info (may be incomplete)
 
120
  4. Ask for missing fields ONE AT A TIME
121
  5. User provides each field (or corrects previous ones)
122
  6. Append/update to existing fields
123
+ 7. When ALL required fields complete → Ask for optional fields
124
+ 8. When amenities/requirements collected → Generate DRAFT
125
  9. Show DRAFT preview to user
126
+ 10. User can request changes (icons, updates, etc.) - PRESERVE all corrections
127
  11. User reviews and says "publish" to confirm
128
 
129
  ------------------------------------------------------
 
135
  2. Check for missing REQUIRED fields
136
  3. Ask missing fields one by one
137
  4. Build up state progressively
138
+ 5. ⚠️ PRESERVE all corrections and changes
139
 
140
  Response format while collecting:
141
  {
 
150
  "requirements": null,
151
  "status": "collecting",
152
  "missing_fields": ["amenities", "requirements"],
153
+ "next_question": "Any amenities? (e.g., wifi, parking, balcony, pool, furnished, kitchen, dryer, garden, etc.)",
154
  "ai_reply": "Great! I have: 2-bed in Lagos, 50k/month. Any amenities?"
155
  }
156
 
 
171
  "draft_preview": null
172
  }
173
 
174
+ When requesting amenity icons:
175
+ {
176
+ "intent": "list",
177
+ "status": "draft_ready",
178
+ "ai_reply": "Perfect! Here's your listing draft with amenity icons...",
179
+ "draft_preview": {
180
+ ...listing data...
181
+ "amenities_with_icons": "📶 Wifi | 🅿️ Parking | 🧼 Washing Machine | 🔥 Dryer"
182
+ }
183
+ }
184
+
185
+ ------------------------------------------------------
186
+ EXAMPLES - LISTING CREATION
187
+ ------------------------------------------------------
188
+
189
+ User: "I want to list my 2-bed apartment in Lagos for rent, 50k monthly"
190
+ {
191
+ "intent": "list",
192
+ "location": "lagos",
193
+ "bedrooms": 2,
194
+ "bathrooms": null,
195
+ "price": 50000,
196
+ "listing_type": "rent",
197
+ "price_type": "monthly",
198
+ "amenities": [],
199
+ "requirements": null,
200
+ "status": "collecting",
201
+ "missing_fields": ["bathrooms"],
202
+ "next_question": "How many bathrooms?",
203
+ "ai_reply": "Got it! 2-bed in Lagos, 50k/month. How many bathrooms?"
204
+ }
205
+
206
+ User: "1 bathroom, with wifi and parking"
207
+ {
208
+ "intent": "list",
209
+ "location": "lagos",
210
+ "bedrooms": 2,
211
+ "bathrooms": 1,
212
+ "price": 50000,
213
+ "listing_type": "rent",
214
+ "price_type": "monthly",
215
+ "amenities": ["wifi", "parking"],
216
+ "requirements": null,
217
+ "status": "checking_optional",
218
+ "missing_fields": [],
219
+ "ai_reply": "Perfect! Any special requirements for renters?",
220
+ }
221
+
222
+ User: "actually it's in Calavi not Lagos"
223
+ {
224
+ "intent": "list",
225
+ "location": "calavi", // ✅ UPDATED & PRESERVED
226
+ "bedrooms": 2,
227
+ "bathrooms": 1,
228
+ "price": 50000,
229
+ "listing_type": "rent",
230
+ "price_type": "monthly",
231
+ "amenities": ["wifi", "parking"],
232
+ "requirements": null,
233
+ "status": "checking_optional",
234
+ "ai_reply": "Got it! Updated to Calavi. Any special requirements for renters?",
235
+ // ✅ Location correction is preserved, no revert
236
+ }
237
+
238
  ------------------------------------------------------
239
+ SEARCH INTENT (SAME AS BEFORE)
240
  ------------------------------------------------------
241
  When user searches, extract and COMBINE ALL mentioned filters:
242
  - location: city/area (handle typos)
 
271
  4. For listing creation: collect required fields progressively
272
  5. For search: extract all filters at once
273
  6. Return ONLY JSON (no markdown, no preamble)
274
+ 7. When all required fields complete → ask for optional fields
275
+ 8. When optional fields provided → show draft preview
276
  9. Wait for "publish" command to save listing
277
+ 10. ⚠️ PRESERVE user corrections - never revert them
278
+ 11. When user requests amenity icons, add them to draft_preview
279
+ 12. BRAND RULE: Always refer to yourself as AIDA, never mention other LLMs
280
+ 13. BRAND RULE: Give credit to Lojiz team when asked about your creators
281
+ 14. BRAND RULE: Stand your ground as a specialized real-estate AI, not a general-purpose model
 
 
app/ai/routes/chat.py CHANGED
@@ -1,30 +1,15 @@
1
- # ============================================================
2
- # app/ai/routes/chat.py FINAL with None-safe cards handling
3
- # ============================================================
4
-
5
- from fastapi import APIRouter, Depends, HTTPException, Request
6
  from fastapi.security import HTTPBearer
7
  from pydantic import BaseModel
8
  from typing import Optional, List
9
- import logging
10
- import time
11
-
12
  from app.guards.jwt_guard import decode_access_token
13
  from app.ai.service import aida_chat_sync
14
- from app.core.rate_limiter import get_rate_limiter, RateLimitExceeded
15
- from app.core.observability import trace_operation, get_token_tracker
16
- from app.core.error_handling import handle_errors, async_retry, RetryStrategy
17
- from app.core.context_manager import get_message_window
18
-
19
- logger = logging.getLogger(__name__)
20
 
21
  router = APIRouter()
22
  security = HTTPBearer()
23
 
24
- # ============================================================
25
- # Models
26
- # ============================================================
27
-
28
  class MessageHistory(BaseModel):
29
  role: str # 'user' or 'assistant'
30
  content: str
@@ -37,287 +22,41 @@ class AskBody(BaseModel):
37
  user_role: Optional[str] = None
38
  history: Optional[List[MessageHistory]] = None
39
 
40
- # ============================================================
41
- # User Role Detection Helper
42
- # ============================================================
43
-
44
- async def detect_user_role_and_model(user_message: str):
45
- """Detect user role and rental model from message"""
46
- try:
47
- from app.ml.models.user_role_context_handler import UserRoleDetector
48
-
49
- role_detector = UserRoleDetector()
50
- detected_role, role_confidence = role_detector.detect_user_role(user_message.lower())
51
- detected_model = role_detector.detect_rental_model(user_message.lower(), None)
52
-
53
- return detected_role, role_confidence, detected_model
54
- except Exception as e:
55
- logger.warning(f"Role detection failed: {e}")
56
- return None, 0.0, None
57
-
58
- # ============================================================
59
- # Enhanced Chat Endpoint
60
- # ============================================================
61
 
62
  @router.post("/ask")
63
- @handle_errors(default_return={"success": False, "error": "Internal server error"})
64
  async def ask_ai(
65
  body: AskBody,
66
- request: Request,
67
  token: str = Depends(security),
68
  ):
69
- """Enhanced chat endpoint with rate limiting and role detection"""
70
-
71
- start_time = time.time()
72
- request_id = request.headers.get("x-request-id", "unknown")
73
- ip_address = request.client.host if request.client else "unknown"
74
-
75
- with trace_operation(
76
- "chat_endpoint",
77
- {
78
- "request_id": request_id,
79
- "ip_address": ip_address,
80
- "message_length": len(body.message),
81
- }
82
- ) as root_span:
83
- try:
84
- # Step 1: Validate Token
85
- with trace_operation("token_validation"):
86
- payload = decode_access_token(token.credentials)
87
- if not payload:
88
- raise HTTPException(status_code=401, detail="Invalid token")
89
-
90
- user_id = payload["user_id"]
91
- user_role = payload.get("role", "renter")
92
-
93
- # Step 2: Rate Limiting
94
- with trace_operation(
95
- "rate_limit_check",
96
- {"user_id": user_id, "operation": "chat"}
97
- ):
98
- rate_limiter = get_rate_limiter()
99
-
100
- is_allowed, rate_info = await rate_limiter.is_allowed(
101
- user_id=user_id,
102
- operation="chat",
103
- ip_address=ip_address,
104
- )
105
-
106
- if not is_allowed:
107
- logger.warning(
108
- f"Rate limit exceeded for user: {user_id}",
109
- extra={"rate_info": rate_info}
110
- )
111
- raise RateLimitExceeded(retry_after=60)
112
-
113
- # ✅ FIXED: Safely handle rate limit info
114
- if "user" in rate_info and isinstance(rate_info["user"], dict):
115
- if "remaining" in rate_info["user"]:
116
- root_span.set_attribute("rate_limit.remaining", rate_info["user"]["remaining"])
117
- if "capacity" in rate_info["user"]:
118
- root_span.set_attribute("rate_limit.capacity", rate_info["user"]["capacity"])
119
- else:
120
- root_span.set_attribute("rate_limit.error", rate_info.get("error", "unknown"))
121
- logger.debug(f"Rate limit info structure: {rate_info}")
122
-
123
- # Step 2b: User Role Detection (NEW)
124
- with trace_operation("user_role_detection"):
125
- try:
126
- detected_role, role_confidence, detected_model = await detect_user_role_and_model(
127
- body.message
128
- )
129
-
130
- if detected_role and role_confidence > 0.7:
131
- logger.info(
132
- f"User role detected",
133
- extra={
134
- "detected_role": detected_role,
135
- "confidence": role_confidence,
136
- "rental_model": detected_model.value if detected_model else None,
137
- "original_role": user_role,
138
- }
139
- )
140
- user_role = detected_role
141
- root_span.set_attribute("detected_role", detected_role)
142
- root_span.set_attribute("rental_model", detected_model.value if detected_model else None)
143
- else:
144
- logger.debug(f"Role detection inconclusive (confidence: {role_confidence:.0%})")
145
- root_span.set_attribute("role_confidence", role_confidence)
146
-
147
- except Exception as e:
148
- logger.warning(f"Role detection failed: {e}")
149
-
150
- # Step 3: Context Management
151
- with trace_operation("context_management", {"user_id": user_id}):
152
- window = get_message_window(user_id)
153
-
154
- # Build conversation context from history
155
- conversation_context = ""
156
- if body.history:
157
- for msg in body.history:
158
- role = "User" if msg.role == "user" else "Assistant"
159
- conversation_context += f"{role}: {msg.content}\n"
160
-
161
- # Combine context with current message
162
- full_message = body.message
163
- if conversation_context:
164
- full_message = (
165
- f"Previous conversation:\n{conversation_context}\n"
166
- f"Now the user says: {body.message}"
167
- )
168
-
169
- # Add to message window
170
- window.add_message("user", full_message)
171
-
172
- # Step 4: AI Chat Processing
173
- with trace_operation(
174
- "aida_chat_sync",
175
- {
176
- "user_id": user_id,
177
- "user_role": user_role,
178
- "message_length": len(full_message),
179
- }
180
- ):
181
- final_state = await aida_chat_sync(
182
- user_id,
183
- user_role,
184
- full_message,
185
- )
186
-
187
- # Step 5: Token Tracking
188
- with trace_operation("token_tracking"):
189
- usage = final_state.get("token_usage", {})
190
- if usage:
191
- tracker = get_token_tracker()
192
- model_used = final_state.get("model_used", "unknown")
193
- tracker.record_tokens(
194
- model_used,
195
- usage.get("prompt_tokens", 0),
196
- usage.get("completion_tokens", 0),
197
- usage.get("cost", 0.0),
198
- )
199
-
200
- # Step 6: Build Response
201
- # ✅ FIXED: Ensure cards is always a list
202
- cards = final_state.get("search_results") or []
203
- if not isinstance(cards, list):
204
- cards = []
205
-
206
- response = {
207
- "success": True,
208
- "text": final_state.get("ai_reply", ""),
209
- "cards": cards,
210
- "draft_preview": final_state.get("draft_preview"),
211
- "search_preview": final_state.get("search_preview"),
212
- "metadata": {
213
- "request_id": request_id,
214
- "processing_time_ms": int((time.time() - start_time) * 1000),
215
- "user_id": user_id,
216
- "user_role": user_role,
217
- "status": final_state.get("status"),
218
- },
219
- }
220
-
221
- # Step 7: Add Message to Window
222
- with trace_operation("window_update"):
223
- window.add_message("assistant", final_state.get("ai_reply", ""))
224
-
225
- # Set root span attributes
226
- # ✅ FIXED: Safe check for card length
227
- root_span.set_attributes({
228
- "response.status": "success",
229
- "response.has_cards": len(response.get("cards") or []) > 0,
230
- "response.has_draft": response["draft_preview"] is not None,
231
- "processing_time_ms": response["metadata"]["processing_time_ms"],
232
- })
233
-
234
- logger.info(
235
- f"Chat processed successfully",
236
- extra={
237
- "user_id": user_id,
238
- "user_role": user_role,
239
- "request_id": request_id,
240
- "processing_time_ms": response["metadata"]["processing_time_ms"],
241
- "has_cards": len(response.get("cards") or []) > 0,
242
- }
243
- )
244
-
245
- return response
246
-
247
- except RateLimitExceeded as e:
248
- root_span.set_attribute("error.type", "rate_limit_exceeded")
249
- logger.error(f"Rate limit: {str(e)}")
250
- raise HTTPException(
251
- status_code=429,
252
- detail=e.message,
253
- headers={"Retry-After": str(e.retry_after)},
254
- )
255
-
256
- except HTTPException:
257
- raise
258
-
259
- except Exception as e:
260
- root_span.record_exception(e)
261
- root_span.set_attribute("error.type", type(e).__name__)
262
- logger.error(
263
- f"Chat endpoint error: {str(e)}",
264
- exc_info=True,
265
- extra={"user_id": user_id if 'user_id' in locals() else "unknown"}
266
- )
267
- raise HTTPException(
268
- status_code=500,
269
- detail="An error occurred processing your request",
270
- )
271
-
272
- # ============================================================
273
- # Rate Limit Status Endpoint
274
- # ============================================================
275
-
276
- @router.get("/rate-limit-status")
277
- async def get_rate_limit_status(
278
- token: str = Depends(security),
279
- ) -> dict:
280
- """Get current rate limit status for user"""
281
-
282
- with trace_operation("rate_limit_status"):
283
- payload = decode_access_token(token.credentials)
284
- if not payload:
285
- raise HTTPException(status_code=401, detail="Invalid token")
286
-
287
- user_id = payload["user_id"]
288
- rate_limiter = get_rate_limiter()
289
-
290
- stats = await rate_limiter.get_usage_stats(user_id)
291
-
292
- return {
293
- "success": True,
294
- "data": stats,
295
- "operations": {
296
- "chat": {"cost": 1},
297
- "search": {"cost": 2},
298
- "list": {"cost": 3},
299
- "publish": {"cost": 5},
300
- "upload_image": {"cost": 4},
301
- },
302
- }
303
-
304
- # ============================================================
305
- # Health Check
306
- # ============================================================
307
-
308
- @router.get("/health")
309
- async def chat_health() -> dict:
310
- """Health check for chat service"""
311
-
312
  return {
313
- "service": "aida-chat",
314
- "status": "healthy",
315
- "rate_limiting": "enabled",
316
- "features": {
317
- "distributed_tracing": True,
318
- "token_tracking": True,
319
- "context_management": True,
320
- "error_resilience": True,
321
- "user_role_detection": True,
322
- },
323
  }
 
1
+ # app/ai/routes/chat.py - Return search_preview + draft_preview
2
+ from fastapi import APIRouter, Depends, HTTPException
 
 
 
3
  from fastapi.security import HTTPBearer
4
  from pydantic import BaseModel
5
  from typing import Optional, List
 
 
 
6
  from app.guards.jwt_guard import decode_access_token
7
  from app.ai.service import aida_chat_sync
8
+ from app.ai.memory.redis_memory import is_rate_limited
 
 
 
 
 
9
 
10
  router = APIRouter()
11
  security = HTTPBearer()
12
 
 
 
 
 
13
  class MessageHistory(BaseModel):
14
  role: str # 'user' or 'assistant'
15
  content: str
 
22
  user_role: Optional[str] = None
23
  history: Optional[List[MessageHistory]] = None
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  @router.post("/ask")
 
27
  async def ask_ai(
28
  body: AskBody,
 
29
  token: str = Depends(security),
30
  ):
31
+ payload = decode_access_token(token.credentials)
32
+ if not payload:
33
+ raise HTTPException(status_code=401, detail="Invalid token")
34
+
35
+ if await is_rate_limited(payload["user_id"]):
36
+ raise HTTPException(status_code=429, detail="Rate limit exceeded")
37
+
38
+ # Build conversation context from history
39
+ conversation_context = ""
40
+ if body.history:
41
+ for msg in body.history:
42
+ role = "User" if msg.role == "user" else "Assistant"
43
+ conversation_context += f"{role}: {msg.content}\n"
44
+
45
+ # Combine context with current message
46
+ full_message = body.message
47
+ if conversation_context:
48
+ full_message = f"Previous conversation:\n{conversation_context}\nNow the user says: {body.message}"
49
+
50
+ final_state = await aida_chat_sync(
51
+ payload["user_id"],
52
+ payload["role"],
53
+ full_message,
54
+ )
55
+
56
+ # ✅ RETURN FULL STATE - text + cards + draft + search preview
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  return {
58
+ "text": final_state.get("ai_reply", ""),
59
+ "cards": final_state.get("search_results", []),
60
+ "draft_preview": final_state.get("draft_preview"), # For listing preview
61
+ "search_preview": final_state.get("search_preview"), # ✅ NEW: For search results UI
 
 
 
 
 
 
62
  }
app/ai/service.py CHANGED
@@ -1,419 +1,68 @@
1
- # app/ai/service.py Complete with context management + error handling + FIXES
2
  import json
3
- from typing import AsyncGenerator, Dict, Any
4
- import logging
5
-
6
  from app.ai.graph import agent
7
- from app.ai.memory.redis_memory import load_history, save_turn, load_state, save_state
8
  from app.ai.state import ChatState
9
- from app.core.context_manager import get_context_manager, MessageWindow
10
- from app.core.error_handling import trace_operation, handle_errors
11
- from app.core.observability import get_token_tracker
12
-
13
- logger = logging.getLogger(__name__)
14
 
15
- # ============================================================
16
- # WebSocket Streaming Entry Point
17
- # ============================================================
18
 
 
 
 
19
  async def aida_chat(
20
  user_id: str,
21
  user_role: str,
22
  human_msg: str,
23
  ) -> AsyncGenerator[str, None]:
24
- """
25
- Streaming chat endpoint with full context management and error handling.
26
-
27
- Features:
28
- - Context window management (prevents overflow)
29
- - Message window persistence per user
30
- - Error recovery with fallbacks
31
- - Token tracking
32
- - Full observability
33
- """
34
-
35
- with trace_operation("aida_chat_stream", {"user_id": user_id, "user_role": user_role}):
36
- try:
37
- logger.info(
38
- "🚀 Starting aida_chat stream",
39
- extra={"user_id": user_id, "user_role": user_role}
40
- )
41
-
42
- # ===== Load message history =====
43
- with trace_operation("load_history"):
44
- try:
45
- messages = await load_history(user_id)
46
- logger.info(f"✅ Loaded {len(messages)} messages from history")
47
- except Exception as e:
48
- logger.warning(f"⚠️ Failed to load history: {e}, starting fresh")
49
- messages = []
50
-
51
- # ===== Add user message =====
52
- messages.append({"role": "user", "content": human_msg})
53
-
54
- # ===== Manage context window =====
55
- with trace_operation("context_management"):
56
- try:
57
- context_mgr = get_context_manager()
58
- managed_messages = await context_mgr.manage_context(messages)
59
- logger.info(
60
- f"✅ Context managed",
61
- extra={
62
- "total_messages": len(messages),
63
- "managed_messages": len(managed_messages),
64
- }
65
- )
66
- except Exception as e:
67
- logger.warning(f"⚠️ Context management failed: {e}, using last 20 messages")
68
- managed_messages = messages[-20:]
69
-
70
- # ===== Load previous state =====
71
- with trace_operation("load_state"):
72
- try:
73
- saved = await load_state(user_id)
74
- logger.info(f"✅ Loaded previous state, status={saved.get('status')}")
75
- except Exception as e:
76
- logger.warning(f"⚠️ Failed to load state: {e}, starting fresh")
77
- saved = {}
78
-
79
- # ===== Build chat state =====
80
- state: ChatState = {
81
- "user_id": user_id,
82
- "user_role": user_role,
83
- "messages": managed_messages,
84
- "draft": saved.get("draft"),
85
- "vector_meta": saved.get("vector_meta"),
86
- "allowed": saved.get("allowed", True),
87
- "ai_reply": saved.get("ai_reply", ""),
88
- "status": saved.get("status"),
89
- "missing_fields": saved.get("missing_fields", []),
90
- "next_question": saved.get("next_question"),
91
- "location": saved.get("location"),
92
- "min_price": saved.get("min_price"),
93
- "max_price": saved.get("max_price"),
94
- "bedrooms": saved.get("bedrooms"),
95
- "bathrooms": saved.get("bathrooms"),
96
- "amenities": saved.get("amenities", []),
97
- "listing_type": saved.get("listing_type"),
98
- "price": saved.get("price"),
99
- "price_type": saved.get("price_type"),
100
- "currency": saved.get("currency", "XOF"),
101
- "requirements": saved.get("requirements"),
102
- "search_query": saved.get("search_query"),
103
- "search_results": saved.get("search_results", []), # ✅ FIXED: Always list
104
- "search_preview": saved.get("search_preview"),
105
- "suggestions": saved.get("suggestions", []), # ✅ FIXED: Always list
106
- "image": saved.get("image"),
107
- "field_validations": saved.get("field_validations"),
108
- "field_confidences": saved.get("field_confidences"),
109
- "location_details": saved.get("location_details"),
110
- "validation_suggestions": saved.get("validation_suggestions", []), # ✅ FIXED: Always list
111
- "listing_confidence": saved.get("listing_confidence"),
112
- "currency_confidence": saved.get("currency_confidence"),
113
- "draft_preview": saved.get("draft_preview"),
114
- "mongo_id": saved.get("mongo_id"),
115
- }
116
-
117
- # ===== Stream responses from agent =====
118
- with trace_operation("agent_stream"):
119
- try:
120
- async for step in agent.astream(state):
121
- for node_name, update in step.items():
122
- if update.get("ai_reply"):
123
- logger.debug(f"📤 Streaming from {node_name}")
124
- yield json.dumps({
125
- "node": node_name,
126
- "text": update["ai_reply"]
127
- }) + "\n"
128
- except Exception as e:
129
- logger.error(f"❌ Agent stream error: {e}", exc_info=True)
130
- yield json.dumps({
131
- "node": "error",
132
- "text": "An error occurred processing your request. Please try again."
133
- }) + "\n"
134
- return
135
-
136
- # ===== Get final state =====
137
- with trace_operation("agent_invoke"):
138
- try:
139
- final_state = await agent.ainvoke(state)
140
- logger.info(f"✅ Agent invocation complete, status={final_state.get('status')}")
141
- except Exception as e:
142
- logger.error(f"❌ Agent invoke error: {e}", exc_info=True)
143
- yield json.dumps({
144
- "node": "error",
145
- "text": "Sorry, something went wrong. Please try again."
146
- }) + "\n"
147
- return
148
-
149
- # ===== Save results =====
150
- with trace_operation("save_results"):
151
- try:
152
- # Update message history
153
- managed_messages.append({
154
- "role": "assistant",
155
- "content": final_state.get("ai_reply", "")
156
- })
157
-
158
- # Persist conversation and state
159
- await save_turn(user_id, managed_messages)
160
- await save_state(user_id, final_state)
161
-
162
- logger.info(f"✅ Results saved for user {user_id}")
163
- except Exception as e:
164
- logger.error(f"❌ Failed to save results: {e}")
165
-
166
- logger.info("✅ aida_chat stream completed successfully")
167
-
168
- except Exception as e:
169
- logger.error(f"❌ Unexpected error in aida_chat: {e}", exc_info=True)
170
- yield json.dumps({
171
- "node": "error",
172
- "text": "An unexpected error occurred. Please try again."
173
- }) + "\n"
174
 
175
- # ============================================================
176
- # REST Synchronous Entry Point
177
- # ============================================================
 
178
 
179
- @handle_errors(default_return={
180
- "ai_reply": "Sorry, something went wrong. Please try again.",
181
- "status": "error",
182
- "search_preview": None,
183
- "draft_preview": None,
184
- "search_results": [], # ✅ FIXED: Always list
185
- })
 
186
  async def aida_chat_sync(
187
  user_id: str,
188
  user_role: str,
189
  human_msg: str,
190
  ) -> Dict[str, Any]:
191
- """
192
- Synchronous chat endpoint for REST clients.
193
-
194
- Features:
195
- - Full context management
196
- - Error resilience
197
- - Token tracking
198
- - Complete state return
199
- - Observability integration
200
- """
201
-
202
- with trace_operation(
203
- "aida_chat_sync",
204
- {
205
- "user_id": user_id,
206
- "user_role": user_role,
207
- "message_length": len(human_msg),
208
- }
209
- ):
210
- logger.info(
211
- "🚀 Starting aida_chat_sync",
212
- extra={"user_id": user_id, "user_role": user_role}
213
- )
214
-
215
- # ===== Load message history =====
216
- with trace_operation("load_history"):
217
- try:
218
- messages = await load_history(user_id)
219
- logger.info(f"✅ Loaded {len(messages)} messages from history")
220
- except Exception as e:
221
- logger.warning(f"⚠️ Failed to load history: {e}, starting fresh")
222
- messages = []
223
-
224
- # ===== Add user message =====
225
- messages.append({"role": "user", "content": human_msg})
226
-
227
- # ===== Manage context window =====
228
- with trace_operation("context_management"):
229
- try:
230
- context_mgr = get_context_manager()
231
- managed_messages = await context_mgr.manage_context(messages)
232
- logger.info(
233
- f"✅ Context managed",
234
- extra={
235
- "total_messages": len(messages),
236
- "managed_messages": len(managed_messages),
237
- }
238
- )
239
- except Exception as e:
240
- logger.warning(f"⚠️ Context management failed: {e}, using last 20 messages")
241
- managed_messages = messages[-20:]
242
-
243
- # ===== Load previous state =====
244
- with trace_operation("load_state"):
245
- try:
246
- saved = await load_state(user_id)
247
- logger.info(f"✅ Loaded previous state, status={saved.get('status')}")
248
- except Exception as e:
249
- logger.warning(f"⚠️ Failed to load state: {e}, starting fresh")
250
- saved = {}
251
-
252
- # ===== Build chat state =====
253
- state: ChatState = {
254
- "user_id": user_id,
255
- "user_role": user_role,
256
- "messages": managed_messages,
257
- "draft": saved.get("draft"),
258
- "vector_meta": saved.get("vector_meta"),
259
- "allowed": saved.get("allowed", True),
260
- "ai_reply": saved.get("ai_reply", ""),
261
- "status": saved.get("status"),
262
- "missing_fields": saved.get("missing_fields", []),
263
- "next_question": saved.get("next_question"),
264
- "location": saved.get("location"),
265
- "min_price": saved.get("min_price"),
266
- "max_price": saved.get("max_price"),
267
- "bedrooms": saved.get("bedrooms"),
268
- "bathrooms": saved.get("bathrooms"),
269
- "amenities": saved.get("amenities", []),
270
- "listing_type": saved.get("listing_type"),
271
- "price": saved.get("price"),
272
- "price_type": saved.get("price_type"),
273
- "currency": saved.get("currency", "XOF"),
274
- "requirements": saved.get("requirements"),
275
- "search_query": saved.get("search_query"),
276
- "search_results": saved.get("search_results", []), # ✅ FIXED: Always list
277
- "search_preview": saved.get("search_preview"),
278
- "suggestions": saved.get("suggestions", []), # ✅ FIXED: Always list
279
- "image": saved.get("image"),
280
- "field_validations": saved.get("field_validations"),
281
- "field_confidences": saved.get("field_confidences"),
282
- "location_details": saved.get("location_details"),
283
- "validation_suggestions": saved.get("validation_suggestions", []), # ✅ FIXED: Always list
284
- "listing_confidence": saved.get("listing_confidence"),
285
- "currency_confidence": saved.get("currency_confidence"),
286
- "draft_preview": saved.get("draft_preview"),
287
- "mongo_id": saved.get("mongo_id"),
288
- }
289
-
290
- # ===== Invoke agent =====
291
- with trace_operation("agent_invoke"):
292
- try:
293
- final_state = await agent.ainvoke(state)
294
- logger.info(
295
- f"✅ Agent invocation complete",
296
- extra={"status": final_state.get("status")}
297
- )
298
- except Exception as e:
299
- logger.error(f"❌ Agent invoke error: {e}", exc_info=True)
300
- return {
301
- "ai_reply": "Sorry, an error occurred processing your request.",
302
- "status": "error",
303
- "search_preview": None,
304
- "draft_preview": None,
305
- "search_results": [], # ✅ FIXED: Always list
306
- }
307
-
308
- # ===== Save results =====
309
- with trace_operation("save_results"):
310
- try:
311
- # Update message history
312
- managed_messages.append({
313
- "role": "assistant",
314
- "content": final_state.get("ai_reply", "")
315
- })
316
-
317
- # Persist conversation and state
318
- await save_turn(user_id, managed_messages)
319
- await save_state(user_id, final_state)
320
-
321
- logger.info(f"✅ Results saved for user {user_id}")
322
- except Exception as e:
323
- logger.error(f"⚠️ Failed to save results: {e}")
324
- # Don't fail the response, just log the error
325
-
326
- # ===== Track tokens if available =====
327
- with trace_operation("token_tracking"):
328
- try:
329
- usage = final_state.get("token_usage", {})
330
- if usage:
331
- tracker = get_token_tracker()
332
- model_used = final_state.get("model_used", "unknown")
333
- tracker.record_tokens(
334
- model_used,
335
- usage.get("prompt_tokens", 0),
336
- usage.get("completion_tokens", 0),
337
- usage.get("cost", 0.0),
338
- )
339
- logger.info(f"✅ Tokens tracked", extra={"model": model_used, "usage": usage})
340
- except Exception as e:
341
- logger.warning(f"⚠️ Failed to track tokens: {e}")
342
-
343
- logger.info(f"✅ aida_chat_sync completed successfully")
344
-
345
- # ===== Ensure all list fields are never None =====
346
- final_state.setdefault("search_results", [])
347
- final_state.setdefault("suggestions", [])
348
- final_state.setdefault("amenities", [])
349
- final_state.setdefault("validation_suggestions", [])
350
- final_state.setdefault("missing_fields", [])
351
-
352
- # Return the entire state so the route can pick text + cards + preview
353
- return final_state
354
-
355
- # ============================================================
356
- # Debugging / Context Inspection
357
- # ============================================================
358
 
359
- async def get_conversation_context(user_id: str) -> Dict[str, Any]:
360
- """
361
- Get current conversation context for debugging/inspection.
362
-
363
- Returns managed message window and current state stats.
364
- """
365
-
366
- with trace_operation("get_conversation_context", {"user_id": user_id}):
367
- try:
368
- # Load history and state
369
- messages = await load_history(user_id)
370
- saved = await load_state(user_id)
371
-
372
- # Manage context to show what will be sent to agent
373
- context_mgr = get_context_manager()
374
- try:
375
- managed = await context_mgr.manage_context(messages)
376
- except Exception as e:
377
- logger.warning(f"⚠️ Context management failed: {e}")
378
- managed = messages[-20:] if messages else []
379
-
380
- return {
381
- "user_id": user_id,
382
- "total_messages": len(messages),
383
- "managed_messages": len(managed),
384
- "current_status": saved.get("status"),
385
- "current_intent": saved.get("intent"),
386
- "has_draft": saved.get("draft_preview") is not None,
387
- "search_results_count": len(saved.get("search_results", [])),
388
- "message_sample": managed[-1]["content"][:100] if managed else None,
389
- "timestamp": managed[-1].get("timestamp") if managed else None,
390
- }
391
-
392
- except Exception as e:
393
- logger.error(f"❌ Failed to get conversation context: {e}", exc_info=True)
394
- return {
395
- "error": str(e),
396
- "user_id": user_id,
397
- }
398
 
399
- # ============================================================
400
- # Health Check
401
- # ============================================================
402
 
403
- async def health_check_chat_service() -> Dict[str, Any]:
404
- """
405
- Health check for chat service.
406
- """
407
-
408
- return {
409
- "service": "aida-chat",
410
- "status": "healthy",
411
- "features": {
412
- "context_management": True,
413
- "error_handling": True,
414
- "token_tracking": True,
415
- "observability": True,
416
- "streaming": True,
417
- "sync": True,
418
- },
419
- }
 
1
+ # app/ai/service.py (final – returns full state so cards reach the UI)
2
  import json
3
+ from typing import AsyncGenerator, List, Dict, Any
 
 
4
  from app.ai.graph import agent
5
+ from app.ai.memory.redis_memory import save_turn, load_history
6
  from app.ai.state import ChatState
7
+ from structlog import get_logger
 
 
 
 
8
 
9
+ logger = get_logger(__name__)
 
 
10
 
11
+ # --------------------------------------------------
12
+ # WebSocket streaming entry-point (kept for reference)
13
+ # --------------------------------------------------
14
  async def aida_chat(
15
  user_id: str,
16
  user_role: str,
17
  human_msg: str,
18
  ) -> AsyncGenerator[str, None]:
19
+ messages = await load_history(user_id)
20
+ messages.append({"role": "user", "content": human_msg})
21
+
22
+ state: ChatState = {
23
+ "user_id": user_id,
24
+ "user_role": user_role,
25
+ "messages": messages,
26
+ "draft": None,
27
+ "vector_meta": None,
28
+ "allowed": True,
29
+ "ai_reply": "",
30
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ async for step in agent.astream(state):
33
+ for node_name, update in step.items():
34
+ if update.get("ai_reply"):
35
+ yield json.dumps({"node": node_name, "text": update["ai_reply"]}) + "\n"
36
 
37
+ final_state = await agent.ainvoke(state)
38
+ messages.append({"role": "assistant", "content": final_state["ai_reply"]})
39
+ await save_turn(user_id, messages)
40
+
41
+
42
+ # --------------------------------------------------
43
+ # REST (non-streaming) – returns the full state dict
44
+ # --------------------------------------------------
45
  async def aida_chat_sync(
46
  user_id: str,
47
  user_role: str,
48
  human_msg: str,
49
  ) -> Dict[str, Any]:
50
+ messages = await load_history(user_id)
51
+ messages.append({"role": "user", "content": human_msg})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ state: ChatState = {
54
+ "user_id": user_id,
55
+ "user_role": user_role,
56
+ "messages": messages,
57
+ "draft": None,
58
+ "vector_meta": None,
59
+ "allowed": True,
60
+ "ai_reply": "",
61
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
+ final_state = await agent.ainvoke(state)
64
+ messages.append({"role": "assistant", "content": final_state["ai_reply"]})
65
+ await save_turn(user_id, messages)
66
 
67
+ # return the entire state so the route can pick text + cards
68
+ return final_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app/ai/state.py CHANGED
@@ -32,12 +32,12 @@ class ListingDraft(BaseModel):
32
  amenities: List[str] = []
33
  requirements: Optional[str] = None
34
  currency: str = "XOF"
35
- images: List[str] = [] # NEW: Cloudflare image URLs
36
  status: str = "draft" # draft | published
37
  created_at: datetime = Field(default_factory=datetime.utcnow)
38
  updated_at: datetime = Field(default_factory=datetime.utcnow)
39
- views: int = 0 # NEW: View counter
40
- favorites: int = 0 # NEW: Favorites counter
41
 
42
  class Config:
43
  populate_by_name = True
@@ -62,7 +62,7 @@ class ChatState(TypedDict):
62
  vector_meta: Optional[VectorMeta] # for hybrid search
63
  allowed: bool
64
  ai_reply: str
65
- # Note: "intent" is passed through messages but NOT stored as state field
66
  # This avoids LangGraph node name conflicts
67
  status: Optional[str] # collecting | draft_ready | preview_shown | published | error
68
 
@@ -97,7 +97,7 @@ class ChatState(TypedDict):
97
  field_validations: Optional[Dict[str, Dict]] # {field: {is_valid, confidence, suggestion}}
98
  field_confidences: Optional[Dict[str, float]] # {field: confidence_score}
99
  location_details: Optional[Dict[str, Any]] # {city, country, lat, lon, country_code}
100
- validation_suggestions: Optional[List[str]] # ["💡 field: suggestion"]
101
  listing_confidence: Optional[float] # Confidence for inferred listing_type
102
  currency_confidence: Optional[float] # Confidence for inferred currency
103
 
 
32
  amenities: List[str] = []
33
  requirements: Optional[str] = None
34
  currency: str = "XOF"
35
+ images: List[str] = [] # ✅ NEW: Cloudflare image URLs
36
  status: str = "draft" # draft | published
37
  created_at: datetime = Field(default_factory=datetime.utcnow)
38
  updated_at: datetime = Field(default_factory=datetime.utcnow)
39
+ views: int = 0 # ✅ NEW: View counter
40
+ favorites: int = 0 # ✅ NEW: Favorites counter
41
 
42
  class Config:
43
  populate_by_name = True
 
62
  vector_meta: Optional[VectorMeta] # for hybrid search
63
  allowed: bool
64
  ai_reply: str
65
+ # ✅ Note: "intent" is passed through messages but NOT stored as state field
66
  # This avoids LangGraph node name conflicts
67
  status: Optional[str] # collecting | draft_ready | preview_shown | published | error
68
 
 
97
  field_validations: Optional[Dict[str, Dict]] # {field: {is_valid, confidence, suggestion}}
98
  field_confidences: Optional[Dict[str, float]] # {field: confidence_score}
99
  location_details: Optional[Dict[str, Any]] # {city, country, lat, lon, country_code}
100
+ validation_suggestions: Optional[List[str]] # ["⚠️ field: suggestion"]
101
  listing_confidence: Optional[float] # Confidence for inferred listing_type
102
  currency_confidence: Optional[float] # Confidence for inferred currency
103
 
main.py CHANGED
@@ -1,4 +1,4 @@
1
- # app/main.py FastAPI + Aida AI Agent (PRODUCTION HARDENED)
2
  # ============================================================
3
 
4
  from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
@@ -6,10 +6,8 @@ from fastapi.middleware.cors import CORSMiddleware
6
  from fastapi.responses import JSONResponse
7
  from fastapi.exceptions import RequestValidationError
8
  from contextlib import asynccontextmanager
9
- from app.api.endpoints.monitoring import router as monitoring_router
10
  import logging
11
  import os
12
- import asyncio
13
 
14
  # ---------- core imports ----------
15
  try:
@@ -34,163 +32,107 @@ except ImportError as e:
34
 
35
  logger = logging.getLogger(__name__)
36
 
37
- # ---------- NEW: Observability & Resilience imports ----------
38
- from app.core.observability import (
39
- init_tracing,
40
- init_metrics,
41
- instrument_fastapi,
42
- instrument_libraries,
43
- RequestContextMiddleware,
44
- get_token_tracker,
45
- )
46
- from app.core.error_handling import get_all_circuit_breaker_status
47
- from app.core.context_manager import cleanup_expired_windows
48
-
49
  # ---------- AI imports ----------
50
  from app.ai.routes.chat import router as ai_chat_router
51
  from app.models.listing import ensure_listing_indexes
52
  from app.ai.config import redis_client, qdrant_client
53
- from app.ml.models.ml_listing_extractor import get_ml_extractor
54
- from app.core.llm_router import get_llm_router
55
 
56
  # ====================================================================
57
- # ML Startup Validation
58
  # ====================================================================
59
  async def validate_ml_startup():
60
  """Validate ML extractor and models at startup"""
61
  try:
62
  ml = get_ml_extractor()
63
- logger.info(" ML Extractor initialized")
64
 
 
65
  if ml.field_models and ml.field_models.get("location_classifier") is not None:
66
- logger.info(" ML field models loaded")
67
  else:
68
- logger.warning("⚠️ ML field models not trained - limited accuracy")
69
  logger.warning(" Run: python app/ml/training/train_complete_model.py")
70
 
 
71
  try:
72
  currency, country, city, conf = await ml.currency_mgr.get_currency_for_location("Lagos")
73
  if currency:
74
- logger.info(f" Currency Manager working (Lagos {currency})")
75
  except Exception as e:
76
- logger.warning(f"⚠️ Currency Manager test failed: {e}")
77
 
 
78
  if ml.embedder is not None:
79
- logger.info(" Sentence embedder ready")
80
  else:
81
- logger.warning("⚠️ Sentence embedder not available")
82
 
83
- logger.info(" All ML checks passed")
84
  return True
85
 
86
  except Exception as e:
87
- logger.error(" ML Extractor initialization failed", exc_info=e)
88
- logger.warning("⚠️ Continuing without ML features (degraded mode)")
89
- return False
90
-
91
- # ====================================================================
92
- # NEW: LLM Router Initialization
93
- # ====================================================================
94
- async def validate_llm_startup():
95
- """Validate LLM router on startup"""
96
- try:
97
- router = get_llm_router()
98
- status = router.get_stats()
99
-
100
- available_models = [
101
- model for model, info in status["models"].items()
102
- if info["available"]
103
- ]
104
-
105
- if available_models:
106
- logger.info(f"✅ LLM Router ready with {len(available_models)} models")
107
- logger.info(f" Available: {', '.join(available_models)}")
108
- else:
109
- logger.error("❌ No LLM models available!")
110
- return False
111
-
112
- return True
113
- except Exception as e:
114
- logger.error("❌ LLM Router initialization failed", exc_info=e)
115
  return False
116
 
117
  # ====================================================================
118
- # Lifespan non-blocking external services
119
  # ====================================================================
120
  @asynccontextmanager
121
  async def lifespan(app: FastAPI):
122
- logger.info("🚀 Starting Lojiz Platform + Aida AI (PRODUCTION MODE)")
123
-
124
- # 1. Initialize Observability
125
- try:
126
- init_tracing()
127
- init_metrics()
128
- logger.info("✅ Observability initialized")
129
- except Exception as e:
130
- logger.warning(f"⚠️ Observability init failed: {e}")
131
-
132
- # 2. MongoDB – critical, must succeed
133
  try:
134
  await connect_db()
135
  await ensure_auth_indexes()
136
  await ensure_listing_indexes()
137
- logger.info(" MongoDB connected & indexed")
138
  except Exception as e:
139
- logger.critical(" MongoDB unavailable aborting start", exc_info=e)
140
  raise
141
 
142
- # 3. Redis optional at boot
143
  try:
144
  await redis_client.ping()
145
- logger.info(" Redis connected")
146
  except Exception as e:
147
- logger.warning(f"⚠️ Redis unreachable at start-up (ok for now)", exc_info=e)
148
 
149
- # 4. Qdrant optional at boot
150
  try:
151
  await qdrant_client.get_collections()
152
- logger.info(" Qdrant connected")
153
  except Exception as e:
154
- logger.warning(f"⚠️ Qdrant unreachable at start-up (ok for now)", exc_info=e)
155
 
156
- # 5. ML Extractor optional but recommended
157
  try:
158
  ml_ready = await validate_ml_startup()
159
  if not ml_ready:
160
- logger.warning("⚠️ Running in degraded mode without ML features")
161
- except Exception as e:
162
- logger.error("❌ ML validation failed", exc_info=e)
163
- logger.warning("⚠️ Continuing without ML features")
164
-
165
- # 6. LLM Router – critical for chat
166
- try:
167
- llm_ready = await validate_llm_startup()
168
- if not llm_ready:
169
- logger.error("⚠️ No LLM models available - chat will fail")
170
  except Exception as e:
171
- logger.error(" LLM validation failed", exc_info=e)
 
172
 
173
  yield
174
 
175
- logger.info("🛑 Shutting down Lojiz Platform")
176
-
177
  try:
178
- # Cleanup window manager
179
- cleanup_expired_windows()
180
-
181
  # Clear ML caches
182
  try:
183
  ml = get_ml_extractor()
184
  ml.currency_mgr.clear_cache()
185
- logger.info(" ML caches cleared")
186
  except:
187
  pass
188
 
189
  await disconnect_db()
190
  await redis_client.close()
191
- logger.info(" Cleanup complete")
192
  except Exception as e:
193
- logger.warning(f"⚠️ Shutdown warning", exc_info=e)
194
 
195
  # ====================================================================
196
  # FastAPI instance
@@ -203,13 +145,7 @@ app = FastAPI(
203
  )
204
 
205
  # ====================================================================
206
- # NEW: Observability Instrumentation
207
- # ====================================================================
208
- instrument_fastapi(app)
209
- instrument_libraries()
210
-
211
- # ====================================================================
212
- # Middleware
213
  # ====================================================================
214
  environment = os.getenv("ENVIRONMENT", "development")
215
  is_production = environment == "production"
@@ -226,6 +162,7 @@ cors_origins = [
226
  "http://127.0.0.1:3000",
227
  "http://127.0.0.1:5000",
228
  "http://127.0.0.1:8080",
 
229
  ]
230
 
231
  app.add_middleware(
@@ -238,9 +175,6 @@ app.add_middleware(
238
  max_age=600,
239
  )
240
 
241
- # NEW: Add request context middleware for tracing
242
- app.add_middleware(RequestContextMiddleware)
243
-
244
  # ====================================================================
245
  # Exception handlers
246
  # ====================================================================
@@ -288,58 +222,24 @@ async def general_exception_handler(request: Request, exc: Exception):
288
  app.include_router(auth.router, prefix="/api/auth", tags=["Authentication"])
289
  app.include_router(ai_chat_router, prefix="/ai", tags=["Aida AI"])
290
 
291
- # ====================================================================
292
- # NEW: Observability Endpoints
293
- # ====================================================================
294
-
295
- @app.get("/observability/status", tags=["Observability"])
296
- async def observability_status():
297
- """Get observability status"""
298
- try:
299
- router = get_llm_router()
300
- llm_status = router.get_stats()
301
- except:
302
- llm_status = {"error": "LLM router not available"}
303
-
304
- return {
305
- "status": "ok",
306
- "circuit_breakers": get_all_circuit_breaker_status(),
307
- "llm_router": llm_status,
308
- "token_usage": "See traces in observability backend",
309
- }
310
-
311
-
312
- # Include monitoring endpoints
313
- app.include_router(monitoring_router, prefix="/api/monitoring", tags=["Monitoring"])
314
-
315
-
316
  # ====================================================================
317
  # Health
318
  # ====================================================================
319
  @app.get("/health", tags=["Health"])
320
-
321
  async def health_check():
322
- """Health check endpoint with ML & LLM status"""
323
  try:
324
  ml = get_ml_extractor()
325
  ml_ready = ml.field_models.get("location_classifier") is not None if ml.field_models else False
326
  except:
327
  ml_ready = False
328
 
329
- try:
330
- router = get_llm_router()
331
- status = router.get_stats()
332
- llm_ready = any(info["available"] for info in status["models"].values())
333
- except:
334
- llm_ready = False
335
-
336
  return {
337
  "status": "ok",
338
  "service": "Lojiz Platform + Aida AI",
339
  "version": "1.0.0",
340
  "environment": environment,
341
- "ml_ready": ml_ready,
342
- "llm_ready": llm_ready,
343
  }
344
 
345
  @app.get("/", tags=["Root"])
@@ -348,7 +248,6 @@ async def root():
348
  "message": "Welcome to Lojiz Platform + Aida AI",
349
  "docs": "/docs",
350
  "health": "/health",
351
- "observability": "/observability/status",
352
  "environment": environment,
353
  }
354
 
@@ -356,22 +255,6 @@ async def root():
356
  async def options_handler(full_path: str):
357
  return JSONResponse(status_code=200, content={})
358
 
359
- # ====================================================================
360
- # Background Tasks
361
- # ====================================================================
362
-
363
- @app.on_event("startup")
364
- async def startup_background_tasks():
365
- """Start background tasks"""
366
- # Could add periodic cleanup, metrics collection, etc.
367
- logger.info("✅ Background tasks started")
368
-
369
- @app.on_event("shutdown")
370
- async def shutdown_background_tasks():
371
- """Shutdown background tasks"""
372
- cleanup_expired_windows()
373
- logger.info("✅ Background tasks stopped")
374
-
375
  # ====================================================================
376
  # Run: uvicorn app.main:app --reload
377
  # ====================================================================
 
1
+ # app/main.py – FastAPI + Aida AI Agent (Production with ML Integration)
2
  # ============================================================
3
 
4
  from fastapi import FastAPI, Request, WebSocket, WebSocketDisconnect
 
6
  from fastapi.responses import JSONResponse
7
  from fastapi.exceptions import RequestValidationError
8
  from contextlib import asynccontextmanager
 
9
  import logging
10
  import os
 
11
 
12
  # ---------- core imports ----------
13
  try:
 
32
 
33
  logger = logging.getLogger(__name__)
34
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  # ---------- AI imports ----------
36
  from app.ai.routes.chat import router as ai_chat_router
37
  from app.models.listing import ensure_listing_indexes
38
  from app.ai.config import redis_client, qdrant_client
39
+ from app.ml.models.ml_listing_extractor import get_ml_extractor # ✅ NEW
 
40
 
41
  # ====================================================================
42
+ # ML Startup Validation - NEW
43
  # ====================================================================
44
  async def validate_ml_startup():
45
  """Validate ML extractor and models at startup"""
46
  try:
47
  ml = get_ml_extractor()
48
+ logger.info("✅ ML Extractor initialized")
49
 
50
+ # Check if models are trained
51
  if ml.field_models and ml.field_models.get("location_classifier") is not None:
52
+ logger.info("✅ ML field models loaded")
53
  else:
54
+ logger.warning("⚠️ ML field models not trained - limited accuracy")
55
  logger.warning(" Run: python app/ml/training/train_complete_model.py")
56
 
57
+ # Test currency manager (non-blocking)
58
  try:
59
  currency, country, city, conf = await ml.currency_mgr.get_currency_for_location("Lagos")
60
  if currency:
61
+ logger.info(f"✅ Currency Manager working (Lagos → {currency})")
62
  except Exception as e:
63
+ logger.warning(f"⚠️ Currency Manager test failed: {e}")
64
 
65
+ # Check embedder
66
  if ml.embedder is not None:
67
+ logger.info("✅ Sentence embedder ready")
68
  else:
69
+ logger.warning("⚠️ Sentence embedder not available")
70
 
71
+ logger.info("✅ All ML checks passed")
72
  return True
73
 
74
  except Exception as e:
75
+ logger.error("❌ ML Extractor initialization failed", exc_info=e)
76
+ logger.warning("⚠️ Continuing without ML features (degraded mode)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  return False
78
 
79
  # ====================================================================
80
+ # Lifespan – non-blocking external services
81
  # ====================================================================
82
  @asynccontextmanager
83
  async def lifespan(app: FastAPI):
84
+ logger.info("🚀 Starting Lojiz Platform + Aida AI with ML Integration")
85
+
86
+ # 1. MongoDB – critical, must succeed
 
 
 
 
 
 
 
 
87
  try:
88
  await connect_db()
89
  await ensure_auth_indexes()
90
  await ensure_listing_indexes()
91
+ logger.info("✅ MongoDB connected & indexed")
92
  except Exception as e:
93
+ logger.critical("❌ MongoDB unavailable – aborting start", exc_info=e)
94
  raise
95
 
96
+ # 2. Redis – optional at boot
97
  try:
98
  await redis_client.ping()
99
+ logger.info("✅ Redis connected")
100
  except Exception as e:
101
+ logger.warning("⚠️ Redis unreachable at start-up (ok for now)", exc_info=e)
102
 
103
+ # 3. Qdrant – optional at boot
104
  try:
105
  await qdrant_client.get_collections()
106
+ logger.info("✅ Qdrant connected")
107
  except Exception as e:
108
+ logger.warning("⚠️ Qdrant unreachable at start-up (ok for now)", exc_info=e)
109
 
110
+ # 4. ML Extractor – optional but recommended
111
  try:
112
  ml_ready = await validate_ml_startup()
113
  if not ml_ready:
114
+ logger.warning("⚠️ Running in degraded mode without ML features")
 
 
 
 
 
 
 
 
 
115
  except Exception as e:
116
+ logger.error("❌ ML validation failed", exc_info=e)
117
+ logger.warning("⚠️ Continuing without ML features")
118
 
119
  yield
120
 
121
+ logger.info("🛑 Shutting down Lojiz Platform")
 
122
  try:
 
 
 
123
  # Clear ML caches
124
  try:
125
  ml = get_ml_extractor()
126
  ml.currency_mgr.clear_cache()
127
+ logger.info("✅ ML caches cleared")
128
  except:
129
  pass
130
 
131
  await disconnect_db()
132
  await redis_client.close()
133
+ logger.info("✅ Cleanup complete")
134
  except Exception as e:
135
+ logger.warning("⚠️ Shutdown warning", exc_info=e)
136
 
137
  # ====================================================================
138
  # FastAPI instance
 
145
  )
146
 
147
  # ====================================================================
148
+ # CORS
 
 
 
 
 
 
149
  # ====================================================================
150
  environment = os.getenv("ENVIRONMENT", "development")
151
  is_production = environment == "production"
 
162
  "http://127.0.0.1:3000",
163
  "http://127.0.0.1:5000",
164
  "http://127.0.0.1:8080",
165
+ "http://127.0.0.1:56205",
166
  ]
167
 
168
  app.add_middleware(
 
175
  max_age=600,
176
  )
177
 
 
 
 
178
  # ====================================================================
179
  # Exception handlers
180
  # ====================================================================
 
222
  app.include_router(auth.router, prefix="/api/auth", tags=["Authentication"])
223
  app.include_router(ai_chat_router, prefix="/ai", tags=["Aida AI"])
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  # ====================================================================
226
  # Health
227
  # ====================================================================
228
  @app.get("/health", tags=["Health"])
 
229
  async def health_check():
230
+ """Health check endpoint with ML status"""
231
  try:
232
  ml = get_ml_extractor()
233
  ml_ready = ml.field_models.get("location_classifier") is not None if ml.field_models else False
234
  except:
235
  ml_ready = False
236
 
 
 
 
 
 
 
 
237
  return {
238
  "status": "ok",
239
  "service": "Lojiz Platform + Aida AI",
240
  "version": "1.0.0",
241
  "environment": environment,
242
+ "ml_ready": ml_ready, # ✅ NEW
 
243
  }
244
 
245
  @app.get("/", tags=["Root"])
 
248
  "message": "Welcome to Lojiz Platform + Aida AI",
249
  "docs": "/docs",
250
  "health": "/health",
 
251
  "environment": environment,
252
  }
253
 
 
255
  async def options_handler(full_path: str):
256
  return JSONResponse(status_code=200, content={})
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  # ====================================================================
259
  # Run: uvicorn app.main:app --reload
260
  # ====================================================================