destinyebuka commited on
Commit
a8ed4d6
Β·
1 Parent(s): 239f45c
app/__pycache__/config.cpython-313.pyc ADDED
Binary file (5.07 kB). View file
 
app/__pycache__/database.cpython-313.pyc ADDED
Binary file (4.84 kB). View file
 
app/ai/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (180 Bytes). View file
 
app/ai/__pycache__/config.cpython-313.pyc ADDED
Binary file (2.34 kB). View file
 
app/ai/__pycache__/graph.cpython-313.pyc ADDED
Binary file (4.45 kB). View file
 
app/ai/__pycache__/service.cpython-313.pyc ADDED
Binary file (2.55 kB). View file
 
app/ai/__pycache__/state.cpython-313.pyc ADDED
Binary file (4.54 kB). View file
 
app/ai/graph.py CHANGED
@@ -9,6 +9,7 @@ from app.ai.nodes.publish_node import publish_node
9
  from app.ai.nodes.edit_node import edit_node
10
  from app.ai.nodes.discard_node import discard_node
11
  from structlog import get_logger
 
12
 
13
  logger = get_logger(__name__)
14
 
@@ -22,61 +23,103 @@ workflow.add_node("create_draft", draft_node)
22
  workflow.add_node("handle_publish", publish_node)
23
  workflow.add_node("handle_edit", edit_node)
24
  workflow.add_node("handle_discard", discard_node)
 
25
 
26
- # ---------- EDGES ----------
27
- workflow.add_edge(START, "parse_intent")
28
-
29
- def route_from_intent(state: ChatState):
30
- intent = state.get("intent")
31
- if intent in {"search", "list"}:
32
- return "check_permissions"
33
- return END
34
- workflow.add_conditional_edges("parse_intent", route_from_intent,
35
- {"check_permissions": "check_permissions", END: END})
36
-
37
- def route_from_permissions(state: ChatState):
38
- if not state.get("allowed", False):
39
- return END
40
- return "search_listings" if state.get("intent") == "search" else "create_draft"
41
- workflow.add_conditional_edges("check_permissions", route_from_permissions,
42
- {"search_listings": "search_listings",
43
- "create_draft": "create_draft", END: END})
44
-
45
- workflow.add_edge("search_listings", END)
46
-
47
- # ---------- POST-DRAFT ROUTING (language-agnostic) ----------
48
  _KEYWORDS_PUBLISH = {"publish", "publier", "go live", "post it", "list it", "confirm", "yes", "ok", "okay"}
49
- _KEYWORDS_EDIT = {"edit", "modifier", "change", "update", "correction", "fix"}
50
  _KEYWORDS_DISCARD = {"discard", "delete", "cancel", "annuler", "remove", "start over"}
51
 
 
 
 
 
 
 
 
 
 
 
 
52
  def route_after_draft(state: ChatState):
 
53
  last = state["messages"][-1]["content"].lower()
 
 
54
  if any(k in last for k in _KEYWORDS_PUBLISH):
55
  return "handle_publish"
56
  if any(k in last for k in _KEYWORDS_EDIT):
57
  return "handle_edit"
58
  if any(k in last for k in _KEYWORDS_DISCARD):
59
  return "handle_discard"
60
- return "parse_intent" # fall back to normal intent
 
 
 
 
 
 
61
 
62
- def gate_before_intent(state: ChatState):
63
- # if draft is on screen, go straight to post-draft router
64
- if state.get("status") == "preview_shown":
65
- return "route_after_draft"
66
- return "parse_intent"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
- workflow.add_conditional_edges(START, gate_before_intent,
69
- {"route_after_draft": "route_after_draft",
70
- "parse_intent": "parse_intent"})
71
- workflow.add_conditional_edges("route_after_draft", route_after_draft,
72
- {"handle_publish": "handle_publish",
73
- "handle_edit": "handle_edit",
74
- "handle_discard": "handle_discard",
75
- "parse_intent": "parse_intent"})
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  workflow.add_edge("handle_publish", END)
78
- workflow.add_edge("handle_edit", END)
79
  workflow.add_edge("handle_discard", END)
 
 
 
 
 
80
 
81
- agent = workflow.compile()
82
- logger.info("βœ… LangGraph compiled – post-draft routing active")
 
9
  from app.ai.nodes.edit_node import edit_node
10
  from app.ai.nodes.discard_node import discard_node
11
  from structlog import get_logger
12
+ from typing import Dict
13
 
14
  logger = get_logger(__name__)
15
 
 
23
  workflow.add_node("handle_publish", publish_node)
24
  workflow.add_node("handle_edit", edit_node)
25
  workflow.add_node("handle_discard", discard_node)
26
+ workflow.add_node("route_after_draft", lambda state: state) # Dummy node for routing
27
 
28
+ # ---------- KEYWORDS FOR ROUTING ----------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  _KEYWORDS_PUBLISH = {"publish", "publier", "go live", "post it", "list it", "confirm", "yes", "ok", "okay"}
30
+ _KEYWORDS_EDIT = {"edit", "change", "modify", "correction", "fix"}
31
  _KEYWORDS_DISCARD = {"discard", "delete", "cancel", "annuler", "remove", "start over"}
32
 
33
+ # ---------- ROUTERS / GATES ----------
34
+
35
+ def gate_before_intent(state: ChatState):
36
+ """Route from START: Check if a draft preview is shown."""
37
+ # If draft is on screen (preview_shown or waiting_for_images), go straight to post-draft router
38
+ if state.get("status") in {"preview_shown", "waiting_for_images", "editing"}:
39
+ # In a real app, 'editing' status might route to a special node, but for now,
40
+ # any status related to an active draft should go to the draft router.
41
+ return "route_after_draft"
42
+ return "parse_intent"
43
+
44
  def route_after_draft(state: ChatState):
45
+ """Route after draft preview based on user's command (publish/edit/discard)."""
46
  last = state["messages"][-1]["content"].lower()
47
+
48
+ # Check for keywords that trigger final actions
49
  if any(k in last for k in _KEYWORDS_PUBLISH):
50
  return "handle_publish"
51
  if any(k in last for k in _KEYWORDS_EDIT):
52
  return "handle_edit"
53
  if any(k in last for k in _KEYWORDS_DISCARD):
54
  return "handle_discard"
55
+
56
+ # If the status is 'editing', any user input is the new value for the field.
57
+ # We should go straight to the draft node to process it.
58
+ if state.get("status") == "editing":
59
+ return "create_draft"
60
+
61
+ return "parse_intent" # fall back to normal intent for casual chat
62
 
63
+ def route_after_permissions(state: ChatState) -> str:
64
+ """Route after permissions check based on intent/allowed status."""
65
+ if state.get("intent") == "search":
66
+ return "search_listings"
67
+
68
+ # If allowed (or we need to collect more data for a list)
69
+ if state.get("allowed", True) and state.get("intent") == "list":
70
+ return "create_draft"
71
+
72
+ # Blocked by role gate (user is not allowed to list this type)
73
+ return END
74
+
75
+ # ---------- EDGES ----------
76
+
77
+ # 1. Initial Entry: Check if a post-draft action is expected
78
+ workflow.add_conditional_edges(
79
+ START,
80
+ gate_before_intent,
81
+ {"route_after_draft": "route_after_draft", "parse_intent": "parse_intent"}
82
+ )
83
 
84
+ # 2. Intent detection -> Permission check
85
+ workflow.add_edge("parse_intent", "check_permissions")
 
 
 
 
 
 
86
 
87
+ # 3. Permission Check -> Route to Listing or Search, or END
88
+ workflow.add_conditional_edges(
89
+ "check_permissions",
90
+ route_after_permissions,
91
+ {"search_listings": "search_listings", "create_draft": "create_draft", END: END}
92
+ )
93
+
94
+ # 4. Listing Collection Flow: The main loop for drafting
95
+ # Note: create_draft will update state with new info, then its output routes back
96
+ # to check_permissions (which currently routes back to create_draft if intent is list)
97
+ # We will simplify this by having draft_node route to END only when the draft is finalized
98
+ # and the user is *not* ready to publish. For simplicity, we loop back to check_permissions
99
+ # until the `route_after_draft` gate is triggered.
100
+ workflow.add_edge("create_draft", "parse_intent") # After draft, always re-parse the user's new message
101
+
102
+ # 5. Post-Draft Actions Routing (triggered when status is 'preview_shown')
103
+ workflow.add_conditional_edges(
104
+ "route_after_draft",
105
+ route_after_draft,
106
+ {
107
+ "handle_publish": "handle_publish",
108
+ "handle_edit": "handle_edit",
109
+ "handle_discard": "handle_discard",
110
+ "parse_intent": "parse_intent",
111
+ "create_draft": "create_draft", # Needed for when status is 'editing'
112
+ }
113
+ )
114
+
115
+ # 6. Final States: Publish/Discard/Search ends the current turn
116
  workflow.add_edge("handle_publish", END)
 
117
  workflow.add_edge("handle_discard", END)
118
+ workflow.add_edge("search_listings", END)
119
+
120
+ # 7. Edit Action: Go back to `create_draft` to process the change and re-show the preview
121
+ workflow.add_edge("handle_edit", "create_draft")
122
+
123
 
124
+ # Compile the graph
125
+ agent = workflow.compile()
app/ai/memory/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (187 Bytes). View file
 
app/ai/memory/__pycache__/redis_memory.cpython-313.pyc ADDED
Binary file (2.63 kB). View file
 
app/ai/nodes/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (186 Bytes). View file
 
app/ai/nodes/__pycache__/discard_node.cpython-313.pyc ADDED
Binary file (983 Bytes). View file
 
app/ai/nodes/__pycache__/draft_node.cpython-313.pyc ADDED
Binary file (10.1 kB). View file
 
app/ai/nodes/__pycache__/edit_node.cpython-313.pyc ADDED
Binary file (2.07 kB). View file
 
app/ai/nodes/__pycache__/intent_node.cpython-313.pyc ADDED
Binary file (15.1 kB). View file
 
app/ai/nodes/__pycache__/publish_node.cpython-313.pyc ADDED
Binary file (2.81 kB). View file
 
app/ai/nodes/__pycache__/role_gate_node.cpython-313.pyc ADDED
Binary file (2.06 kB). View file
 
app/ai/nodes/__pycache__/search_node.cpython-313.pyc ADDED
Binary file (14.4 kB). View file
 
app/ai/nodes/discard_node.py CHANGED
@@ -10,6 +10,7 @@ async def discard_node(state: Dict) -> Dict:
10
  "price", "listing_type", "price_type", "amenities", "requirements",
11
  "missing_fields", "editing_field"]:
12
  state.pop(key, None)
 
13
  state.update({
14
  "status": "discarded",
15
  "ai_reply": "Brouillon supprimΓ©. Que souhaitez-vous faire ensuite ?",
 
10
  "price", "listing_type", "price_type", "amenities", "requirements",
11
  "missing_fields", "editing_field"]:
12
  state.pop(key, None)
13
+
14
  state.update({
15
  "status": "discarded",
16
  "ai_reply": "Brouillon supprimΓ©. Que souhaitez-vous faire ensuite ?",
app/ai/nodes/draft_node.py CHANGED
@@ -1,34 +1,22 @@
1
- # app/ai/nodes/draft_node.py – COMPLETE FILE (final)
2
  import datetime
3
- from typing import Dict
 
4
  from bson import ObjectId
5
- from app.database import get_db
6
  from app.ai.state import ListingDraft
7
  from structlog import get_logger
8
- from openai import AsyncOpenAI
9
- from app.config import settings
10
- from app.ml.models.ml_listing_extractor import get_ml_extractor
11
 
12
  logger = get_logger(__name__)
13
 
14
- client = AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
15
  ml_extractor = get_ml_extractor()
16
 
17
- # ========== AMENITY ICONS ==========
18
  AMENITY_ICONS = {
19
- "wifi": "πŸ“Ά",
20
- "parking": "πŸ…ΏοΈ",
21
- "furnished": "πŸ›‹οΈ",
22
- "washing machine": "🧼",
23
- "washing": "🧼",
24
- "dryer": "πŸ”₯",
25
- "balcony": "πŸ–οΈ",
26
- "pool": "🏊",
27
- "gym": "πŸ’ͺ",
28
- "garden": "🌿",
29
- "air conditioning": "❄️",
30
- "ac": "❄️",
31
- "kitchen": "🍳",
32
  }
33
 
34
  def _add_amenity_icons(amenities: list) -> str:
@@ -38,155 +26,151 @@ def _add_amenity_icons(amenities: list) -> str:
38
  for amenity in amenities:
39
  amenity_lower = amenity.lower().strip()
40
  icon = AMENITY_ICONS.get(amenity_lower, "βœ“")
41
- icons_text.append(f"{icon} {amenity.title()}")
42
  return " | ".join(icons_text)
43
 
44
- def _generate_title(state: Dict) -> str:
45
- bedrooms = state.get("bedrooms", "")
46
- location = state.get("location", "").title()
47
- listing_type = state.get("listing_type", "").title()
48
- if bedrooms and location:
49
- return f"{bedrooms}-Bedroom {listing_type} in {location}"
50
- return f"Property in {location}"
51
-
52
- def _generate_description(state: Dict) -> str:
53
- bedrooms = state.get("bedrooms", "")
54
- bathrooms = state.get("bathrooms", "")
55
- location = state.get("location", "").title()
56
- amenities = state.get("amenities", [])
57
- price = state.get("price", "")
58
- price_type = state.get("price_type", "").title()
59
- listing_type = state.get("listing_type", "").title()
60
- requirements = state.get("requirements", "")
61
-
62
- desc = f"Spacious {bedrooms}-bedroom, {bathrooms}-bathroom {listing_type} "
63
- desc += f"located in {location}. "
64
-
65
- if price:
66
- desc += f"Priced at {price:,} {price_type}. "
67
-
68
- if amenities:
69
- desc += f"Fully furnished with modern amenities including {', '.join(amenities)}. "
70
-
71
- if requirements:
72
- desc += f"Requirements: {requirements}. "
73
-
74
- desc += "Perfect for large families or shared accommodation."
75
- return desc
76
-
77
- # ========== MAIN NODE ==========
78
- async def draft_node(state: Dict) -> Dict:
79
- """
80
- LangGraph node:
81
- - Check if status == "checking_optional" (amenities/requirements missing)
82
- - Ask for amenities and requirements
83
- - Move to "draft_ready"
84
- - Or status == "draft_ready" β†’ show preview
85
- """
86
-
87
- # βœ… NEW: Check if amenities/requirements are missing
88
- if state.get("status") == "checking_optional":
89
- amenities = state.get("amenities", [])
90
- requirements = state.get("requirements")
91
-
92
- missing_optional = []
93
- if not amenities:
94
- missing_optional.append("amenities")
95
- if not requirements:
96
- missing_optional.append("requirements")
97
-
98
- if missing_optional:
99
- state["status"] = "collecting_optional"
100
- state["missing_fields"] = missing_optional
101
-
102
- questions = []
103
- if "amenities" in missing_optional:
104
- questions.append("Any amenities? (e.g., wifi, parking, balcony, pool, furnished, kitchen, dryer, garden, etc.)")
105
- if "requirements" in missing_optional:
106
- questions.append("Any special requirements for renters?")
107
-
108
- state["ai_reply"] = "Just a couple more things...\n\n" + "\n".join([f"β€’ {q}" for q in questions])
109
- logger.info("ℹ️ Asking for optional fields", missing=missing_optional)
110
  return state
111
- else:
112
- # All optional fields provided, move to draft
113
- state["status"] = "draft_ready"
114
-
115
- # Only process if listing creation with all fields ready
116
- if state.get("intent") != "list" or state.get("status") != "draft_ready":
117
- return state
118
-
119
- user_id = state.get("user_id")
120
-
121
- # βœ… ML VALIDATION before drafting
122
- try:
123
- validation = ml_extractor.validate_all_fields(state, user_id)
124
 
125
- if not validation["all_valid"]:
126
- issues_text = "\n".join([f"❌ {issue}" for issue in validation["issues"]])
127
- state["ai_reply"] = f"""I found some issues with your listing:
128
 
129
- {issues_text}
130
-
131
- Let me ask again - could you clarify these fields?"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
- state["status"] = "collecting"
134
- state["missing_fields"] = [
135
- field for field, result in validation["field_validations"].items()
136
- if not result["is_valid"]
137
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
- logger.warning("🚫 Fields failed ML validation", issues=validation["issues"])
140
- return state
 
141
 
142
- logger.info("βœ… All fields passed ML validation", user_id=user_id)
143
 
144
- except Exception as e:
145
- logger.error("❌ ML validation error", exc_info=e)
146
- state["ai_reply"] = "Sorry, I couldn't validate your listing. Please try again."
147
- state["status"] = "error"
148
- return state
149
 
150
- # Generate title and description
151
- title = _generate_title(state)
152
- description = _generate_description(state)
153
- amenities_with_icons = _add_amenity_icons(state.get("amenities", []))
154
 
155
- # βœ… Get images from state (if any were uploaded)
156
- images = state.get("draft", {}).get("images", []) if isinstance(state.get("draft"), dict) else []
 
 
 
157
 
158
- # Build draft preview with all fields including images and icons
159
  draft_preview = {
160
- "title": title,
161
- "description": description,
162
- "location": state.get("location", "").title(),
163
- "bedrooms": state.get("bedrooms"),
164
- "bathrooms": state.get("bathrooms"),
165
- "price": state.get("price"),
166
- "price_type": state.get("price_type"),
167
- "listing_type": state.get("listing_type"),
168
- "amenities": state.get("amenities", []),
169
- "amenities_with_icons": amenities_with_icons,
170
- "requirements": state.get("requirements"),
171
- "currency": state.get("currency", "XOF"),
172
- "images": images,
173
- "field_confidences": validation["field_validations"],
174
  }
175
 
176
- logger.info("🎯 Draft preview generated",
177
- title=title,
178
- location=state.get("location"),
179
- image_count=len(images),
180
- amenities=state.get("amenities", []))
181
-
182
- # Build nice preview message for user
183
- images_section = ""
184
- if images:
185
- images_section = f"\nπŸ“· Images: {len(images)} uploaded\n"
186
- for idx, img_url in enumerate(images[:3], 1):
187
- images_section += f" {idx}. {img_url[:60]}...\n"
188
- if len(images) > 3:
189
- images_section += f" ... and {len(images) - 3} more\n"
190
 
191
  preview_text = f"""
192
  β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
@@ -198,7 +182,7 @@ Let me ask again - could you clarify these fields?"""
198
  πŸ“ Location: {draft_preview['location']}
199
  πŸ›οΈ Bedrooms: {draft_preview['bedrooms']}
200
  🚿 Bathrooms: {draft_preview['bathrooms']}
201
- πŸ’° Price: {draft_preview['price']:,} {draft_preview['price_type']} ({draft_preview['currency']})
202
 
203
  {draft_preview['description']}
204
 
@@ -207,25 +191,24 @@ Let me ask again - could you clarify these fields?"""
207
  └────────────────────────────────────────────────────────────────────────��
208
  """
209
 
210
- # βœ… Check if images uploaded, ask if not
211
  if not images:
212
  preview_text += """
213
- πŸ“Έ Upload property images to make your listing more attractive!
214
 
215
- Then say **publish** to make it live!
216
  """
217
- state["status"] = "waiting_for_images" # βœ… Wait for images
218
  else:
219
  preview_text += """
220
- βœ… Perfect! Say **publish** to make your listing live!
 
 
221
  """
222
- state["status"] = "preview_shown" # βœ… Ready to publish
223
 
224
  state["draft_preview"] = draft_preview
225
  state["ai_reply"] = preview_text
226
 
227
- # ------------------------------------------------------------------
228
- # FINAL FLAG – triggers the gate in graph.py
229
- # ------------------------------------------------------------------
230
- state["status"] = "preview_shown"
231
  return state
 
1
+ # app/ai/nodes/draft_node.py – COMPLETE FILE (final with editing logic)
2
  import datetime
3
+ import json
4
+ from typing import Dict, List
5
  from bson import ObjectId
 
6
  from app.ai.state import ListingDraft
7
  from structlog import get_logger
8
+ from app.ml.models.ml_listing_extractor import get_ml_extractor # Assuming this is available
9
+ from app.ai.nodes.intent_node import get_llm_chain, _clean_json # Import helper functions
 
10
 
11
  logger = get_logger(__name__)
12
 
 
13
  ml_extractor = get_ml_extractor()
14
 
15
+ # ========== AMENITY ICONS (Keep as is) ==========
16
  AMENITY_ICONS = {
17
+ "wifi": "πŸ“Ά", "parking": "πŸ…ΏοΈ", "furnished": "πŸ›‹οΈ", "washing machine": "🧼",
18
+ "washing": "🧼", "dryer": "πŸ”₯", "balcony": "πŸ–οΈ", "pool": "🏊", "gym": "πŸ’ͺ",
19
+ "garden": "🌿", "air conditioning": "❄️", "ac": "❄️", "kitchen": "🍳",
 
 
 
 
 
 
 
 
 
 
20
  }
21
 
22
  def _add_amenity_icons(amenities: list) -> str:
 
26
  for amenity in amenities:
27
  amenity_lower = amenity.lower().strip()
28
  icon = AMENITY_ICONS.get(amenity_lower, "βœ“")
29
+ icons_text.append(f"{icon} {amenity}")
30
  return " | ".join(icons_text)
31
 
32
+ def _generate_title(draft: Dict) -> str:
33
+ """Simple title generation based on key fields."""
34
+ bds = draft.get("bedrooms", "N/A")
35
+ loc = draft.get("location", "N/A")
36
+ price = f"{draft.get('price', 0):,.0f}"
37
+ p_type = draft.get("price_type", "monthly").capitalize()
38
+
39
+ return f"{bds}-Bed {draft['listing_type'].capitalize()} in {loc} | {price} {p_type}"
40
+
41
+
42
+ def _update_draft_fields(state: Dict) -> Dict:
43
+ """Updates the draft object with all current fields from state."""
44
+ # The fields should already be updated in the state by the intent_node
45
+ # but we consolidate them here for clarity and ML checks.
46
+
47
+ # 1. Handle Editing Mode
48
+ if state.get("status") == "editing" and state.get("editing_field"):
49
+ field = state["editing_field"]
50
+ last_message = state["messages"][-1]["content"]
51
+
52
+ # Use ML extractor to infer the new value from the user's message
53
+ # We try to re-run the extraction on the single user message for better parsing
54
+ try:
55
+ extracted = ml_extractor.extract_fields(last_message, state.get("user_id"))
56
+ new_value = extracted.get(field)
57
+
58
+ if new_value is not None:
59
+ state[field] = new_value
60
+ state["editing_field"] = None # Clear editing field
61
+ state["status"] = "collecting" # Go back to collecting/validating
62
+ logger.info(f"βœ… Field updated by edit", field=field, value=new_value)
63
+ else:
64
+ # If ML extractor fails to find the field in the simple message,
65
+ # keep editing status and ask again.
66
+ state["ai_reply"] = f"DΓ©solΓ©, je n'ai pas compris la nouvelle valeur pour **{field}**. Pouvez-vous l'indiquer clairement ?"
67
+ logger.warning(f"⚠️ Failed to update field, asking again", field=field)
68
+ return state
69
+
70
+ except Exception as e:
71
+ logger.error("❌ ML Extraction failed during edit", exc_info=e)
72
+ state["ai_reply"] = f"Une erreur s'est produite lors de la mise Γ  jour de **{field}**. Veuillez rΓ©essayer plus tard."
73
+ state["editing_field"] = None
74
+ state["status"] = "collecting"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  return state
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
 
 
 
77
 
78
+ # 2. Build current draft (using all state fields)
79
+ draft_dict = {
80
+ "owner_id": state["user_id"],
81
+ "listing_type": state.get("listing_type"),
82
+ "location": state.get("location"),
83
+ "bedrooms": state.get("bedrooms"),
84
+ "bathrooms": state.get("bathrooms"),
85
+ "price": state.get("price"),
86
+ "price_type": state.get("price_type"),
87
+ "currency": state.get("currency", "XOF"),
88
+ "amenities": state.get("amenities", []),
89
+ "requirements": state.get("requirements"),
90
+ "images": state.get("draft_preview", {}).get("images", []), # Preserve images if exists
91
+ }
92
+
93
+ # Use ML to fill in or validate missing Title and Description
94
+ if draft_dict.get("location") and draft_dict.get("price"):
95
+ # We need a placeholder title/description for validation and preview
96
+ draft_dict["title"] = _generate_title(draft_dict)
97
+ draft_dict["description"] = "A lovely property located in the heart of the city."
98
+
99
+ # In a real setup, you would call the LLM again here to generate a high-quality
100
+ # Title and Description based on all collected data.
101
+ # e.g., draft_dict["title"], draft_dict["description"] = await _generate_content_with_llm(draft_dict)
102
+
103
+ state["draft"] = ListingDraft.parse_obj(draft_dict)
104
+
105
+ return state
106
+
107
+ # ------------------------------------------------------------------
108
+ # MAIN NODE FUNCTION
109
+ # ------------------------------------------------------------------
110
 
111
+ async def draft_node(state: Dict) -> Dict:
112
+ logger.info("πŸ”¨ Entering draft_node", status=state.get("status"), editing_field=state.get("editing_field"))
113
+
114
+ # 1. Update the draft object from state fields, applying edits if needed
115
+ state = _update_draft_fields(state)
116
+
117
+ # Check if a specific reply was set during editing or validation
118
+ if state.get("ai_reply") and state["status"] == "editing":
119
+ return state # The edit process set a question, return now.
120
+
121
+ # 2. Re-extract missing fields for the next step
122
+ required_fields = ["location", "bedrooms", "bathrooms", "price", "listing_type", "price_type"]
123
+ missing_fields = [f for f in required_fields if not state.get(f)]
124
+ state["missing_fields"] = missing_fields
125
+
126
+ # 3. Determine Next Action
127
+ if missing_fields:
128
+ # Ask for the next required field
129
+ next_field = missing_fields[0]
130
+ questions = {
131
+ "location": "Great! What city is the property located in?",
132
+ "bedrooms": "How many bedrooms does it have?",
133
+ "bathrooms": "And how many bathrooms?",
134
+ "price": "What is the price, and is that monthly, nightly, or yearly?",
135
+ "listing_type": "Is this for rent, short-stay, or sale?",
136
+ "price_type": "Please clarify the price type (e.g., monthly, nightly).",
137
+ }
138
+ state["ai_reply"] = questions.get(next_field, "I need a little more information to create your listing.")
139
+ state["status"] = "collecting"
140
+ logger.info("❓ Asking for missing field", field=next_field)
141
 
142
+ else:
143
+ # All required fields are present: generate preview
144
+ return await _generate_preview_and_ask_publish(state)
145
 
146
+ return state
147
 
 
 
 
 
 
148
 
149
+ # ------------------------------------------------------------------
150
+ # PREVIEW GENERATION
151
+ # ------------------------------------------------------------------
 
152
 
153
+ async def _generate_preview_and_ask_publish(state: Dict) -> Dict:
154
+ """Generates the final human-readable preview."""
155
+
156
+ draft = state["draft"].dict() # Use the consolidated draft model
157
+ images = state.get("draft_preview", {}).get("images", []) # Get images from previous state
158
 
159
+ # Create the human-readable preview
160
  draft_preview = {
161
+ 'title': draft.get('title', _generate_title(draft)),
162
+ 'location': draft['location'],
163
+ 'bedrooms': draft['bedrooms'],
164
+ 'bathrooms': draft['bathrooms'],
165
+ 'price': draft['price'],
166
+ 'price_type': draft['price_type'],
167
+ 'currency': draft['currency'],
168
+ 'description': draft.get('description', 'No description yet.'),
169
+ 'amenities_with_icons': _add_amenity_icons(draft.get('amenities', [])),
170
+ 'images': images,
 
 
 
 
171
  }
172
 
173
+ images_section = f"\nπŸ–ΌοΈ Images uploaded: {len(images)}" if images else "❌ No images uploaded."
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  preview_text = f"""
176
  β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
 
182
  πŸ“ Location: {draft_preview['location']}
183
  πŸ›οΈ Bedrooms: {draft_preview['bedrooms']}
184
  🚿 Bathrooms: {draft_preview['bathrooms']}
185
+ πŸ’° Price: {draft_preview['price']:,.0f} {draft_preview['price_type']} ({draft_preview['currency']})
186
 
187
  {draft_preview['description']}
188
 
 
191
  └────────────────────────────────────────────────────────────────────────��
192
  """
193
 
194
+ # Ask for final action
195
  if not images:
196
  preview_text += """
197
+ πŸ“Έ **Upload property images** to make your listing more attractive! (Or say 'publish' now)
198
 
199
+ You can also say **'edit the price'** or **'discard'** to start over.
200
  """
201
+ state["status"] = "waiting_for_images"
202
  else:
203
  preview_text += """
204
+ βœ… Perfect! You can now say **'publish'** to make your listing live!
205
+
206
+ You can also say **'edit the location'** or **'discard'** to start over.
207
  """
208
+ state["status"] = "preview_shown"
209
 
210
  state["draft_preview"] = draft_preview
211
  state["ai_reply"] = preview_text
212
 
213
+ logger.info("πŸ“’ Draft preview generated", status=state["status"])
 
 
 
214
  return state
app/ai/nodes/edit_node.py CHANGED
@@ -10,7 +10,7 @@ _KEYWORDS_MAP = {
10
  "bedrooms": {"bedroom", "chambre", "piece", "piΓ¨ce"},
11
  "bathrooms": {"bathroom", "salle de bain", "toilet", "douche"},
12
  "amenities": {"amenity", "equipment", "Γ©quipement", "wifi", "parking"},
13
- "requirements":{"requirement", "condition", "exigence"},
14
  }
15
 
16
  def _detect_field_to_edit(text: str) -> str | None:
@@ -30,8 +30,7 @@ async def edit_node(state: Dict) -> Dict:
30
  state["ai_reply"] = f"Quelle est la nouvelle valeur pour **{field}** ?"
31
  else:
32
  state["status"] = "editing"
33
- state["ai_reply"] = ("Que souhaitez-vous modifier ? \n"
34
- "– location \n– price \n– bedrooms \n– bathrooms \n"
35
- "– amenities \n– requirements")
36
- logger.info("πŸ“ Edit requested", field=field)
37
  return state
 
10
  "bedrooms": {"bedroom", "chambre", "piece", "piΓ¨ce"},
11
  "bathrooms": {"bathroom", "salle de bain", "toilet", "douche"},
12
  "amenities": {"amenity", "equipment", "Γ©quipement", "wifi", "parking"},
13
+ "requirements": {"requirement", "condition", "exigence"},
14
  }
15
 
16
  def _detect_field_to_edit(text: str) -> str | None:
 
30
  state["ai_reply"] = f"Quelle est la nouvelle valeur pour **{field}** ?"
31
  else:
32
  state["status"] = "editing"
33
+ state["ai_reply"] = "Quel champ souhaitez-vous modifier ? (par ex. 'price', 'location', etc.)"
34
+
35
+ logger.info("✏️ Draft edit requested", field=field)
 
36
  return state
app/ai/nodes/intent_node.py CHANGED
@@ -1,4 +1,4 @@
1
- # app/ai/nodes/intent_node.py – COMPLETE FILE
2
  import json, re, os
3
  from typing import Dict, List
4
  from tenacity import retry, stop_after_attempt, wait_exponential
@@ -41,15 +41,13 @@ def _get_next_question(missing: List[str]) -> str | None:
41
  # ---------- system prompt ----------
42
  _SYSTEM_PROMPT_PATH = os.path.join(os.path.dirname(__file__), "../prompts/system_prompt.txt")
43
  with open(_SYSTEM_PROMPT_PATH, encoding="utf-8") as f:
44
- SYSTEM_PROMPT = f.read()
45
 
46
  # ---------- dynamic example ----------
47
- async def _generate_complete_example_in_user_language(human_msg: str) -> str:
48
  prompt = f"""
49
  You are Aida, Lojiz's real-estate assistant. The user wants to list a property.
50
- Generate a SHORT property listing example in the EXACT SAME LANGUAGE as this message: "{human_msg}"
51
-
52
- IMPORTANT: respond ENTIRELY in that language. Every word must be in the user's language.
53
  The example must briefly contain:
54
  - location (city/area)
55
  - bedrooms & bathrooms
@@ -62,7 +60,7 @@ Keep it SHORT – 2 sentences, conversational.
62
  Use a DIFFERENT, natural introductory phrase each time (vary it).
63
 
64
  Output format (nothing else):
65
- [Natural intro in user's language]
66
 
67
  πŸ“ **Example:**
68
  "[2-sentence listing with all fields]"
@@ -77,9 +75,19 @@ Output format (nothing else):
77
  return resp.choices[0].message.content.strip()
78
  except Exception as e:
79
  logger.warning("⚠️ Example generation failed", exc_info=e)
80
- # ultra-short fallback – still in user’s language (model will detect)
81
  return "πŸ“ **Example:** \n\"Bel appartement 2 chambres, 1 salle de bain Γ  Cotonou, avec wifi et parking. 50 000 F CFA par mois, 1 mois de caution requis.\""
82
 
 
 
 
 
 
 
 
 
 
 
 
83
  # ---------- main node ----------
84
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=5))
85
  async def intent_node(state: Dict) -> Dict:
@@ -87,7 +95,40 @@ async def intent_node(state: Dict) -> Dict:
87
  user_role = state["user_role"]
88
  human_msg = state["messages"][-1]["content"]
89
 
90
- # ===== 1. HANDLE ON-GOING EDIT (language agnostic) =====
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  if state.get("status") == "editing" and state.get("editing_field"):
92
  field = state.pop("editing_field")
93
  new_raw = human_msg
@@ -107,12 +148,14 @@ async def intent_node(state: Dict) -> Dict:
107
  state["requirements"] = new_raw.strip()
108
 
109
  state["status"] = "draft_ready"
110
- state["ai_reply"] = "βœ… Mis Γ  jour ! Voici la nouvelle prΓ©-visualisation :" # LLM will override with user language
111
  return state
112
  # ============================================================
113
 
 
 
114
  messages = [
115
- {"role": "system", "content": SYSTEM_PROMPT.replace("{user_role}", user_role)},
116
  {"role": "user", "content": human_msg},
117
  ]
118
  logger.info("πŸ€– Aida intent call", user_role=user_role, msg=human_msg[:80])
@@ -148,7 +191,7 @@ async def intent_node(state: Dict) -> Dict:
148
  "next_question": _get_next_question(missing),
149
  })
150
  if should_show_example:
151
- dynamic_example = await _generate_complete_example_in_user_language(human_msg)
152
  data["ai_reply"] = dynamic_example
153
  else:
154
  data["ai_reply"] = _get_next_question(missing)
@@ -156,7 +199,7 @@ async def intent_node(state: Dict) -> Dict:
156
  data.update({
157
  "status": "checking_optional",
158
  "missing_fields": [],
159
- "ai_reply": "Perfect! Any amenities or special requirements?",
160
  })
161
 
162
  # ---------- search ----------
 
1
+ # app/ai/nodes/intent_node.py – COMPLETE FILE (with command-lock & language-lock)
2
  import json, re, os
3
  from typing import Dict, List
4
  from tenacity import retry, stop_after_attempt, wait_exponential
 
41
  # ---------- system prompt ----------
42
  _SYSTEM_PROMPT_PATH = os.path.join(os.path.dirname(__file__), "../prompts/system_prompt.txt")
43
  with open(_SYSTEM_PROMPT_PATH, encoding="utf-8") as f:
44
+ SYSTEM_PROMPT_RAW = f.read()
45
 
46
  # ---------- dynamic example ----------
47
+ async def _generate_complete_example_in_user_language(human_msg: str, lang: str) -> str:
48
  prompt = f"""
49
  You are Aida, Lojiz's real-estate assistant. The user wants to list a property.
50
+ Generate a SHORT property listing example in {lang.upper()}.
 
 
51
  The example must briefly contain:
52
  - location (city/area)
53
  - bedrooms & bathrooms
 
60
  Use a DIFFERENT, natural introductory phrase each time (vary it).
61
 
62
  Output format (nothing else):
63
+ [Natural intro in {lang}]
64
 
65
  πŸ“ **Example:**
66
  "[2-sentence listing with all fields]"
 
75
  return resp.choices[0].message.content.strip()
76
  except Exception as e:
77
  logger.warning("⚠️ Example generation failed", exc_info=e)
 
78
  return "πŸ“ **Example:** \n\"Bel appartement 2 chambres, 1 salle de bain Γ  Cotonou, avec wifi et parking. 50 000 F CFA par mois, 1 mois de caution requis.\""
79
 
80
+ # ---------- language detector ----------
81
+ def _detect_language(msg: str) -> str:
82
+ msg_l = msg.lower()
83
+ if any(w in msg_l for w in {"franΓ§ais", "bonjour", "prix", "loyer", "salut", "appartement"}):
84
+ return "french"
85
+ if any(w in msg_l for w in {"hola", "precio", "alquiler", "casa", "habitaciΓ³n"}):
86
+ return "spanish"
87
+ if any(w in msg_l for w in {"ola", "preΓ§o", "aluguel", "casa", "quarto"}):
88
+ return "portuguese"
89
+ return "english"
90
+
91
  # ---------- main node ----------
92
  @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=5))
93
  async def intent_node(state: Dict) -> Dict:
 
95
  user_role = state["user_role"]
96
  human_msg = state["messages"][-1]["content"]
97
 
98
+ # ===== 0. LANGUAGE LOCK (once per conversation) =====
99
+ if "user_language" not in state:
100
+ state["user_language"] = _detect_language(human_msg)
101
+ lang = state["user_language"]
102
+
103
+ # ===== 1. COMMAND CHECK (only when draft is on screen) =====
104
+ last_msg = human_msg.lower().strip()
105
+ if state.get("status") == "preview_shown":
106
+ if any(k in last_msg for k in {"publish", "publier", "confirmer", "go live", "post it", "list it", "yes", "ok", "okay"}):
107
+ state.update({
108
+ "intent": "list",
109
+ "status": "publishing",
110
+ "ai_reply": "βœ… Publishing your listing now..." if lang=="english" else "βœ… Publication en cours...",
111
+ })
112
+ logger.info("πŸ”’ Command detected: publish")
113
+ return state
114
+ if any(k in last_msg for k in {"edit", "modifier", "change", "update", "correction", "fix"}):
115
+ state.update({
116
+ "intent": "list",
117
+ "status": "editing",
118
+ "ai_reply": "What would you like to change?" if lang=="english" else "Que voulez-vous modifier ?",
119
+ })
120
+ logger.info("πŸ”’ Command detected: edit")
121
+ return state
122
+ if any(k in last_msg for k in {"discard", "delete", "cancel", "annuler", "remove", "start over"}):
123
+ state.update({
124
+ "intent": "list",
125
+ "status": "discarding",
126
+ "ai_reply": "Draft cleared. What would you like to do next?" if lang=="english" else "Brouillon supprimΓ©. Que souhaitez-vous faire ensuite ?",
127
+ })
128
+ logger.info("πŸ”’ Command detected: discard")
129
+ return state
130
+
131
+ # ===== 2. HANDLE ON-GOING EDIT (language agnostic) =====
132
  if state.get("status") == "editing" and state.get("editing_field"):
133
  field = state.pop("editing_field")
134
  new_raw = human_msg
 
148
  state["requirements"] = new_raw.strip()
149
 
150
  state["status"] = "draft_ready"
151
+ state["ai_reply"] = "βœ… Updated! Here’s the new preview:" if lang=="english" else "βœ… Mis Γ  jour ! Voici la nouvelle prΓ©-visualisation :"
152
  return state
153
  # ============================================================
154
 
155
+ # ===== 3. NORMAL INTENT PARSING =====
156
+ SYSTEM_PROMPT = SYSTEM_PROMPT_RAW.replace("{user_role}", user_role) + f"\nRespond entirely in {lang}."
157
  messages = [
158
+ {"role": "system", "content": SYSTEM_PROMPT},
159
  {"role": "user", "content": human_msg},
160
  ]
161
  logger.info("πŸ€– Aida intent call", user_role=user_role, msg=human_msg[:80])
 
191
  "next_question": _get_next_question(missing),
192
  })
193
  if should_show_example:
194
+ dynamic_example = await _generate_complete_example_in_user_language(human_msg, lang)
195
  data["ai_reply"] = dynamic_example
196
  else:
197
  data["ai_reply"] = _get_next_question(missing)
 
199
  data.update({
200
  "status": "checking_optional",
201
  "missing_fields": [],
202
+ "ai_reply": "Perfect! Any amenities or special requirements?" if lang=="english" else "Parfait ! Des équipements ou exigences particulières ?",
203
  })
204
 
205
  # ---------- search ----------
app/ai/quota/quota_manager.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app/ai/quota/quota_manager.py - Track daily listing & search quota
2
+ import json
3
+ from datetime import datetime, timedelta
4
+ from typing import Dict, Tuple
5
+ from app.ai.config import redis_client
6
+ from structlog import get_logger
7
+
8
+ logger = get_logger(__name__)
9
+
10
+ class QuotaManager:
11
+ """Manage daily quota for listings and searches"""
12
+
13
+ # Quota limits (per day)
14
+ FREE_LISTING_QUOTA = 3
15
+ FREE_SEARCH_QUOTA = 10
16
+ PREMIUM_LISTING_QUOTA = float('inf')
17
+ PREMIUM_SEARCH_QUOTA = float('inf')
18
+
19
+ @staticmethod
20
+ def _get_quota_key(user_id: str, quota_type: str) -> str:
21
+ """Generate Redis key for quota tracking"""
22
+ today = datetime.utcnow().strftime("%Y-%m-%d")
23
+ return f"quota:{user_id}:{quota_type}:{today}"
24
+
25
+ @staticmethod
26
+ def _get_ttl_seconds() -> int:
27
+ """Get seconds until end of day (UTC)"""
28
+ now = datetime.utcnow()
29
+ tomorrow = now + timedelta(days=1)
30
+ end_of_day = tomorrow.replace(hour=0, minute=0, second=0, microsecond=0)
31
+ return int((end_of_day - now).total_seconds())
32
+
33
+ @staticmethod
34
+ async def check_listing_quota(user_id: str, subscription_type: str) -> Tuple[bool, Dict]:
35
+ """
36
+ Check if user can create another listing today
37
+ Returns: (allowed: bool, info: Dict with current_count and limit)
38
+ """
39
+ quota_key = QuotaManager._get_quota_key(user_id, "listings")
40
+
41
+ # Get current count
42
+ current_count = await redis_client.get(quota_key)
43
+ current_count = int(current_count) if current_count else 0
44
+
45
+ # Determine limit based on subscription
46
+ if subscription_type == "premium":
47
+ limit = QuotaManager.PREMIUM_LISTING_QUOTA
48
+ allowed = True
49
+ else:
50
+ limit = QuotaManager.FREE_LISTING_QUOTA
51
+ allowed = current_count < limit
52
+
53
+ info = {
54
+ "current_count": current_count,
55
+ "limit": limit,
56
+ "remaining": limit - current_count if limit != float('inf') else float('inf'),
57
+ "allowed": allowed
58
+ }
59
+
60
+ logger.info("πŸ” Listing quota check", user_id=user_id,
61
+ subscription=subscription_type, current=current_count, limit=limit)
62
+
63
+ return allowed, info
64
+
65
+ @staticmethod
66
+ async def increment_listing_quota(user_id: str) -> None:
67
+ """Increment listing count for today"""
68
+ quota_key = QuotaManager._get_quota_key(user_id, "listings")
69
+ ttl = QuotaManager._get_ttl_seconds()
70
+
71
+ # Increment and set expiry
72
+ await redis_client.incr(quota_key)
73
+ await redis_client.expire(quota_key, ttl)
74
+
75
+ current = await redis_client.get(quota_key)
76
+ logger.info("πŸ“ Listing quota incremented", user_id=user_id, current_count=int(current))
77
+
78
+ @staticmethod
79
+ async def check_search_quota(user_id: str, subscription_type: str) -> Tuple[bool, Dict]:
80
+ """
81
+ Check if user can perform another search today
82
+ Returns: (allowed: bool, info: Dict with current_count and limit)
83
+ """
84
+ quota_key = QuotaManager._get_quota_key(user_id, "searches")
85
+
86
+ # Get current count
87
+ current_count = await redis_client.get(quota_key)
88
+ current_count = int(current_count) if current_count else 0
89
+
90
+ # Determine limit based on subscription
91
+ if subscription_type == "premium":
92
+ limit = QuotaManager.PREMIUM_SEARCH_QUOTA
93
+ allowed = True
94
+ else:
95
+ limit = QuotaManager.FREE_SEARCH_QUOTA
96
+ allowed = current_count < limit
97
+
98
+ info = {
99
+ "current_count": current_count,
100
+ "limit": limit,
101
+ "remaining": limit - current_count if limit != float('inf') else float('inf'),
102
+ "allowed": allowed
103
+ }
104
+
105
+ logger.info("πŸ” Search quota check", user_id=user_id,
106
+ subscription=subscription_type, current=current_count, limit=limit)
107
+
108
+ return allowed, info
109
+
110
+ @staticmethod
111
+ async def increment_search_quota(user_id: str) -> None:
112
+ """Increment search count for today"""
113
+ quota_key = QuotaManager._get_quota_key(user_id, "searches")
114
+ ttl = QuotaManager._get_ttl_seconds()
115
+
116
+ # Increment and set expiry
117
+ await redis_client.incr(quota_key)
118
+ await redis_client.expire(quota_key, ttl)
119
+
120
+ current = await redis_client.get(quota_key)
121
+ logger.info("πŸ”Ž Search quota incremented", user_id=user_id, current_count=int(current))
122
+
123
+ @staticmethod
124
+ async def get_quota_status(user_id: str, subscription_type: str) -> Dict:
125
+ """Get complete quota status for user"""
126
+ listing_allowed, listing_info = await QuotaManager.check_listing_quota(user_id, subscription_type)
127
+ search_allowed, search_info = await QuotaManager.check_search_quota(user_id, subscription_type)
128
+
129
+ return {
130
+ "listings": listing_info,
131
+ "searches": search_info,
132
+ "can_list": listing_allowed,
133
+ "can_search": search_allowed,
134
+ "subscription_type": subscription_type
135
+ }
app/ai/state.py CHANGED
@@ -1,4 +1,4 @@
1
- # app/ai/state.py – COMPLETE
2
  from typing import TypedDict, List, Optional, Dict, Any
3
  from pydantic import BaseModel, Field
4
  from datetime import datetime
@@ -9,14 +9,16 @@ class PyObjectId(str):
9
  @classmethod
10
  def __get_validators__(cls):
11
  yield cls.validate
 
12
  @classmethod
13
  def validate(cls, v):
14
  if not ObjectId.is_valid(v):
15
  raise ValueError("Invalid ObjectId")
16
  return str(v)
17
 
18
- # ---------- Mongo shape ----------
19
  class ListingDraft(BaseModel):
 
20
  id: Optional[PyObjectId] = Field(default_factory=PyObjectId, alias="_id")
21
  owner_id: str
22
  type: str # rent | short_stay | roommate | sale
@@ -30,71 +32,75 @@ class ListingDraft(BaseModel):
30
  amenities: List[str] = []
31
  requirements: Optional[str] = None
32
  currency: str = "XOF"
33
- images: List[str] = []
34
- status: str = "draft"
35
  created_at: datetime = Field(default_factory=datetime.utcnow)
36
  updated_at: datetime = Field(default_factory=datetime.utcnow)
37
- views: int = 0
38
- favorites: int = 0
39
 
40
  class Config:
41
  populate_by_name = True
42
  json_encoders = {ObjectId: str}
43
 
44
  class VectorMeta(BaseModel):
45
- mongo_id: str
46
- embedding: List[float]
 
47
  location: str
48
  price: float
49
  bedrooms: Optional[int] = None
50
  price_type: str
51
 
52
- # ========== LangGraph state ==========
53
  class ChatState(TypedDict):
54
- # --- core ---
55
  user_id: str
56
  user_role: str # landlord | renter
57
- messages: List[dict]
58
  draft: Optional[ListingDraft]
59
- vector_meta: Optional[VectorMeta]
60
  allowed: bool
61
  ai_reply: str
62
- status: Optional[str]
63
-
64
- # --- search filters ---
 
 
65
  location: Optional[str]
66
  min_price: Optional[float]
67
  max_price: Optional[float]
68
  bedrooms: Optional[int]
69
  bathrooms: Optional[int]
70
  amenities: List[str]
71
-
72
- # --- listing creation ---
73
- listing_type: Optional[str]
74
  price: Optional[float]
75
- price_type: Optional[str]
76
  currency: str
77
  requirements: Optional[str]
78
-
79
- # --- flow helpers ---
80
  missing_fields: List[str]
81
  next_question: Optional[str]
 
 
82
  search_query: Optional[str]
83
  search_results: Optional[List[dict]]
84
  suggestions: Optional[List[dict]]
 
 
85
  image: Optional[Dict[str, str]] # {mime, data}
86
-
87
- # --- ML fields ---
88
- field_validations: Optional[Dict[str, Dict]]
89
- field_confidences: Optional[Dict[str, float]]
90
- location_details: Optional[Dict[str, Any]]
91
- validation_suggestions: Optional[List[str]]
92
- listing_confidence: Optional[float]
93
- currency_confidence: Optional[float]
94
-
95
- # --- draft & publish ---
96
  draft_preview: Optional[Dict]
97
- mongo_id: Optional[str]
98
-
99
- # --- NEW: editing ---
100
- editing_field: Optional[str]
 
1
+ # app/ai/state.py - FIXED WITH ML FIELDS
2
  from typing import TypedDict, List, Optional, Dict, Any
3
  from pydantic import BaseModel, Field
4
  from datetime import datetime
 
9
  @classmethod
10
  def __get_validators__(cls):
11
  yield cls.validate
12
+
13
  @classmethod
14
  def validate(cls, v):
15
  if not ObjectId.is_valid(v):
16
  raise ValueError("Invalid ObjectId")
17
  return str(v)
18
 
19
+ # ---------- models ----------
20
  class ListingDraft(BaseModel):
21
+ """Pure MongoDB shape (source of truth)"""
22
  id: Optional[PyObjectId] = Field(default_factory=PyObjectId, alias="_id")
23
  owner_id: str
24
  type: str # rent | short_stay | roommate | sale
 
32
  amenities: List[str] = []
33
  requirements: Optional[str] = None
34
  currency: str = "XOF"
35
+ images: List[str] = [] # βœ… NEW: Cloudflare image URLs
36
+ status: str = "draft" # draft | published
37
  created_at: datetime = Field(default_factory=datetime.utcnow)
38
  updated_at: datetime = Field(default_factory=datetime.utcnow)
39
+ views: int = 0 # βœ… NEW: View counter
40
+ favorites: int = 0 # βœ… NEW: Favorites counter
41
 
42
  class Config:
43
  populate_by_name = True
44
  json_encoders = {ObjectId: str}
45
 
46
  class VectorMeta(BaseModel):
47
+ """What we push to Qdrant (search-only)"""
48
+ mongo_id: str # same _id as Mongo
49
+ embedding: List[float] # 1536-dim vector
50
  location: str
51
  price: float
52
  bedrooms: Optional[int] = None
53
  price_type: str
54
 
55
+ # ========== LangGraph state with ML fields ==========
56
  class ChatState(TypedDict):
57
+ # ========== Core Fields ==========
58
  user_id: str
59
  user_role: str # landlord | renter
60
+ messages: List[dict] # chat history
61
  draft: Optional[ListingDraft]
62
+ vector_meta: Optional[VectorMeta] # for hybrid search
63
  allowed: bool
64
  ai_reply: str
65
+ # βœ… Note: "intent" is passed through messages but NOT stored as state field
66
+ # This avoids LangGraph node name conflicts
67
+ status: Optional[str] # collecting | draft_ready | preview_shown | published | error
68
+
69
+ # ========== Search filters (from LLM) ==========
70
  location: Optional[str]
71
  min_price: Optional[float]
72
  max_price: Optional[float]
73
  bedrooms: Optional[int]
74
  bathrooms: Optional[int]
75
  amenities: List[str]
76
+
77
+ # ========== Listing creation fields (from LLM) ==========
78
+ listing_type: Optional[str] # rent | short_stay | sale | roommate
79
  price: Optional[float]
80
+ price_type: Optional[str] # monthly | nightly | yearly | daily | weekly
81
  currency: str
82
  requirements: Optional[str]
83
+
84
+ # ========== Collection Flow ==========
85
  missing_fields: List[str]
86
  next_question: Optional[str]
87
+
88
+ # ========== Search results ==========
89
  search_query: Optional[str]
90
  search_results: Optional[List[dict]]
91
  suggestions: Optional[List[dict]]
92
+
93
+ # ========== Image upload ==========
94
  image: Optional[Dict[str, str]] # {mime, data}
95
+
96
+ # ========== ML Fields (NEW - for validation & inference) ==========
97
+ field_validations: Optional[Dict[str, Dict]] # {field: {is_valid, confidence, suggestion}}
98
+ field_confidences: Optional[Dict[str, float]] # {field: confidence_score}
99
+ location_details: Optional[Dict[str, Any]] # {city, country, lat, lon, country_code}
100
+ validation_suggestions: Optional[List[str]] # ["βœ… field: suggestion"]
101
+ listing_confidence: Optional[float] # Confidence for inferred listing_type
102
+ currency_confidence: Optional[float] # Confidence for inferred currency
103
+
104
+ # ========== Draft & Publishing ==========
105
  draft_preview: Optional[Dict]
106
+ mongo_id: Optional[str]
 
 
 
app/ml/models/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (187 Bytes). View file
 
app/ml/models/__pycache__/ml_listing_extractor.cpython-313.pyc ADDED
Binary file (28.9 kB). View file
 
debug_flow.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # debug_flow.py
2
+ import asyncio
3
+ from app.ai.service import aida_chat_sync
4
+
5
+ async def test_flow():
6
+ user_id = "debug_user"
7
+ user_role = "landlord"
8
+ messages = [
9
+ "I want to list my 2-bedroom apartment in Lagos for 50k monthly",
10
+ "2 bathrooms",
11
+ "wifi and parking",
12
+ "no requirements",
13
+ "publish"
14
+ ]
15
+
16
+ state = None
17
+ for msg in messages:
18
+ print(f"\n[USER]: {msg}")
19
+ state = await aida_chat_sync(user_id, user_role, msg)
20
+ print(f"[AIDA]: {state.get('ai_reply')}")
21
+ print(f"[STATUS]: {state.get('status')}")
22
+ print(f"[MISSING]: {state.get('missing_fields')}")
23
+ print(f"[DRAFT]: {state.get('draft_preview') is not None}")
24
+ print(f"[MONGO_ID]: {state.get('mongo_id')}")
25
+
26
+ if __name__ == "__main__":
27
+ asyncio.run(test_flow())