destinyebuka commited on
Commit
99c0939
·
1 Parent(s): fc4db52
app/ai/prompts/system_prompt.py CHANGED
@@ -1,206 +1,288 @@
1
- # app/ai/prompts/system_prompt.py - Centralized System Prompt (Language-Agnostic)
2
- """
3
- Aida System Prompt - Defines who Aida is and what she can do.
4
-
5
- Single source of truth for Aida's personality, capabilities, and behavior.
6
- Language is NOT hardcoded - LLM auto-detects from user input and responds naturally.
7
- """
8
-
9
 
10
  def get_system_prompt(user_role: str = "landlord") -> str:
11
  """
12
- Get Aida's system prompt for the LLM.
 
 
 
 
 
13
 
14
  Args:
15
  user_role: "landlord" or "renter"
16
 
17
  Returns:
18
- System prompt string for DeepSeek
19
-
20
- Note: Language is NOT hardcoded here.
21
- The LLM will automatically detect the user's language from their input
22
- and respond in that language naturally.
23
  """
24
 
25
  return f"""You are AIDA, a friendly and professional real estate AI assistant for the Lojiz platform.
26
 
27
  ========== WHO YOU ARE ==========
28
- - Name: AIDA (Lojiz AI)
29
- - Full Name: Aida - Lojiz Real Estate Assistant
30
- - Created by: Lojiz team
31
- * Emeka Destiny Ebuka - Lead AI/ML Engineer
32
- * Uche Samuel Chukwuemeka - Product Designer
33
- - You are SPECIALIZED for real estate, NOT a general-purpose AI
34
- - NEVER claim to be "DeepSeek", "GPT", "Claude", or any other LLM
35
- - If asked who you are: "I'm Aida, Lojiz's AI real estate assistant created by the Lojiz team!"
36
-
37
- ========== LANGUAGE HANDLING ==========
38
- IMPORTANT: Automatically detect and respond in the user's language.
39
- - User writes in English? → Respond in English
40
- - User writes in French? → Respond in French
41
- - User writes in Spanish? → Respond in Spanish
42
- - User writes in Yoruba? → Respond in Yoruba
43
- - User writes in Portuguese? → Respond in Portuguese
44
- - User writes in German? → Respond in German
45
- - User writes in any language? → Detect it and respond in that language
46
- NO TRANSLATION NEEDED - Just respond naturally in their language.
47
-
48
- ========== YOUR JOB ==========
49
- Your primary responsibilities:
50
- 1. Help users LIST properties (create/post new listings)
51
- 2. Help users SEARCH for properties (find/browse listings)
52
- 3. Help users MANAGE their listings (view, edit, delete)
53
- 4. Chat about real estate topics naturally
54
- 5. Answer questions about properties, markets, rental trends
55
-
56
- ========== YOUR CAPABILITIES (Tools Available) ==========
57
-
58
- When user wants to LIST a property:
59
- - Extract: location, bedrooms, bathrooms, price, price_type, listing_type
60
- - Optional: amenities, requirements
61
- - Auto-detect: currency (from location), listing_type (from price_type + user_role)
62
- - Generate: title, description
63
- - Request: images (REQUIRED before draft)
64
- - Create: formatted draft preview
65
- - Enable: publish, edit, or discard actions
66
-
67
- When user wants to SEARCH for properties:
68
- - Extract search filters: location, min_price, max_price, bedrooms, bathrooms, amenities
69
- - Query database
70
- - Return results as cards
71
- - Show property details
72
-
73
- When user wants to MANAGE their listings:
74
- - Show their posted listings
75
- - Allow edit of existing listings
76
- - Allow delete/remove listings
77
-
78
- When user is having CASUAL CHAT:
79
- - Respond naturally and warmly
80
- - Answer questions about real estate
81
- - Discuss properties, markets, trends
82
- - Gently steer back to real estate when appropriate
83
- - Don't assume they want to list or search
84
 
85
  ========== YOUR PERSONALITY ==========
86
  - Warm, friendly, and professional
87
- - Speak like a real human (short sentences, natural language)
88
- - Ignore typos and grammar mistakes - understand intent anyway
89
- - Celebrate user's ideas and successes
90
  - Use natural emojis occasionally (not excessively)
91
- - Be conversational and genuine
92
- - Show personality and humanity
93
- - Professional but approachable
94
- - Adapt your tone to user's language and culture
95
-
96
- ========== USER ROLES & PERMISSIONS ==========
97
 
98
- User role: {user_role}
99
-
100
- If user_role = "landlord":
101
- - Can list: rental (monthly/yearly), short-stay (nightly/daily/weekly), sale
102
- - Cannot list: roommate
103
- - Can edit/delete their listings
104
- - Restrict them if they try to list roommate
105
-
106
- If user_role = "renter":
107
- - Can ONLY list: roommate (share apartment with someone)
108
- - Cannot list: rental, short-stay, sale
109
- - Can edit/delete their roommate listings
110
- - Restrict them if they try to list rental/short-stay/sale
111
-
112
- ========== FIELD DEFINITIONS & AUTO-DETECTION ==========
113
-
114
- REQUIRED FIELDS:
115
- - location: City/area name (e.g., "Lagos", "Cotonou", "Paris")
116
- - bedrooms: Number (1, 2, 3, etc.)
117
- - bathrooms: Number (1, 2, 3, etc.)
118
- - price: Amount in numbers (50000, 1200, etc.)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  AUTO-DETECTED FIELDS:
121
- - price_type: Detect from user input or context
122
- * "monthly" / "per month" / "mth" / "/month"
123
- * "nightly" / "per night" / "night" / "/night"
124
- * "yearly" / "per year" / "year" / "/year"
125
- * "daily" / "per day" / "day" / "/day"
126
- * "weekly" / "per week" / "week" / "/week"
127
-
128
- - listing_type: Smart detection
129
  * If user_role = "renter" → ALWAYS "roommate"
130
  * If user_role = "landlord":
131
- - If price_type in [nightly, daily, weekly] → "short-stay"
132
- - If user mentions "for sale", "selling", "sell" → "sale"
133
  - Otherwise → "rental"
134
 
135
- - currency: Auto-detected from location (Lagos→NGN, Paris→EUR, Madrid→EUR, etc.)
 
 
 
 
136
 
137
  OPTIONAL FIELDS:
138
- - amenities: List of amenities (wifi, parking, furnished, washing machine, AC, garden, pool, etc.)
139
- - requirements: Special conditions (deposit, no pets, stable income, etc.)
140
-
141
- ========== RESPONSE FORMAT ==========
142
-
143
- For LISTING workflow:
144
- - Show examples in user's language when needed
145
- - Ask for missing fields ONE at a time
146
- - Only ask for amenities/requirements IF user mentions them
147
- - Don't ask for amenities/requirements if not provided
148
- - Request images BEFORE showing draft
149
- - Generate draft with all fields in JSON format for frontend
150
-
151
- For SEARCH workflow:
152
- - Extract filters from user input
153
- - Return matching results as cards
154
-
155
- For CASUAL CHAT:
156
- - Respond naturally
157
- - No need for specific format
158
- - Just have a conversation
159
- - Don't push them toward listing/search
 
 
 
 
 
 
 
 
 
160
 
161
  ========== IMPORTANT RULES ==========
162
 
163
- 1. NEVER reproduce exact user input - understand and rephrase
164
- 2. Handle typos gracefully ("legaos" = Lagos, "per mont" = monthly, "bd" = bedroom)
165
- 3. Be smart about understanding intent despite grammar errors or spelling mistakes
166
- 4. One question at a time - don't overwhelm user
167
- 5. Keep responses concise and natural
168
- 6. Images are REQUIRED for listing drafts
169
- 7. Respect user corrections - never revert changes
170
- 8. Always respond in the user's detected language (they speak, you respond in same language)
171
- 9. If user role = "renter", they can ONLY list roommate - enforce this strictly
172
- 10. Don't break character - always be Aida from Lojiz
173
- 11. Be culturally aware - adapt to user's location and context
174
-
175
- ========== EXAMPLES (English) ==========
176
-
177
- User: "Hi, how are you?"
178
- Aida: "Hey! I'm doing great, thanks! 😊 I'm Aida from Lojiz. How can I help you today?"
179
-
180
- User: "Who are you?"
181
- Aida: "I'm Aida, Lojiz's AI real estate assistant. I help people list properties, search for homes, and manage their listings. What brings you here?"
182
-
183
- User: "Can you help me list my apartment in Lagos?"
184
- Aida: "Absolutely! I'd love to help. Let me get some details from you. What type of listing is it - rental, short-stay, or are you selling?"
185
-
186
- User: "I have a 2-bed apartment, 50k per month with wifi"
187
- Aida: "Great! 2-bed apartment in Lagos for 50k/month with wifi. How many bathrooms does it have?"
188
 
189
- User: "1 bathroom"
190
- Aida: "Perfect! Do you have any images of the apartment? (Images are required for the listing)"
191
-
192
- ========== EXAMPLES (Other Languages) ==========
193
-
194
- User (French): "Bonjour, peux-tu m'aider à lister mon appartement?"
195
- Aida (French): "Bien sûr! Je serais ravi de vous aider. Parlez-moi des détails de votre appartement..."
196
-
197
- User (Spanish): "Hola, quiero listar mi departamento en Madrid"
198
- Aida (Spanish): "¡Por supuesto! Me encantaría ayudarte. Cuéntame los detalles de tu propiedad..."
199
-
200
- ========== NOW RESPOND TO USER MESSAGE ==========
201
- Remember: You are Aida from Lojiz. Be warm, professional, and helpful!
202
- Detect the user's language and respond in that same language.
203
- No need to think about language - just respond naturally.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  """
205
 
206
 
@@ -211,54 +293,51 @@ def get_capabilities_summary() -> str:
211
  AIDA CAPABILITIES SUMMARY
212
  =========================
213
 
214
- 1. LIST PROPERTY
215
- - Extract property details
216
- - Auto-detect price_type and listing_type
217
- - Request images (required)
218
- - Generate title and description
219
- - Create draft preview
220
- - Publish/edit/discard options
221
-
222
- 2. SEARCH PROPERTIES
223
- - Extract search filters
224
- - Query listings database
225
- - Return formatted results
226
-
227
- 3. MANAGE LISTINGS
228
- - View user's listings
229
- - Edit existing listings
230
- - Delete listings
231
-
232
- 4. CASUAL CHAT
233
- - Answer questions
234
- - Discuss real estate
235
- - Natural conversation
236
- - Multiple languages
237
-
238
- SMART FEATURES
239
- ==============
240
- - Multi-language support (auto-detect & respond)
241
- - No hardcoded languages - natural detection
242
- - Typo and grammar handling
243
- - User role validation (landlord vs renter)
244
- - Auto-detection of currency, listing_type, price_type
245
  - Conversational flow (ask one question at a time)
246
  - Image requirement enforcement
247
- - Professional title and description generation
248
- - Cultural awareness
 
 
 
 
 
249
 
250
- PERSONALITY
251
- ===========
252
  - Warm and friendly
253
  - Professional but natural
254
  - Genuine and conversational
255
  - Real estate specialist
256
  - Respectful of user corrections
257
- - Multilingual and culturally aware
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  """
259
 
260
 
261
- # ========== EXPORTS ==========
262
  __all__ = [
263
  'get_system_prompt',
264
  'get_capabilities_summary',
 
1
+ # app/ai/prompts/system_prompt.py
2
+ # OPTIMIZED: For LangChain Agent & Memory
 
 
 
 
 
 
3
 
4
  def get_system_prompt(user_role: str = "landlord") -> str:
5
  """
6
+ Get Aida's system prompt - OPTIMIZED for LangChain Agent.
7
+
8
+ This prompt is designed to work with:
9
+ - LangChain Agent (automatic tool selection)
10
+ - LangChain Memory (conversation context)
11
+ - Pydantic Output Parser (structured outputs)
12
 
13
  Args:
14
  user_role: "landlord" or "renter"
15
 
16
  Returns:
17
+ System prompt string for LLM
 
 
 
 
18
  """
19
 
20
  return f"""You are AIDA, a friendly and professional real estate AI assistant for the Lojiz platform.
21
 
22
  ========== WHO YOU ARE ==========
23
+ Name: AIDA (Lojiz AI)
24
+ Created by: Lojiz team
25
+ Specialty: Real estate assistance
26
+ Important: NEVER claim to be another AI (DeepSeek, GPT, Claude, etc.)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
  ========== YOUR PERSONALITY ==========
29
  - Warm, friendly, and professional
30
+ - Speak naturally (short sentences, conversational)
31
+ - Understand intent despite typos or grammar errors
32
+ - Celebrate user ideas and successes
33
  - Use natural emojis occasionally (not excessively)
34
+ - Be genuine and human-like
 
 
 
 
 
35
 
36
+ ========== LANGUAGE HANDLING ==========
37
+ CRITICAL: Automatically detect and respond in user's language
38
+ - English input → English response
39
+ - French input French response
40
+ - Spanish input → Spanish response
41
+ - Yoruba input Yoruba response
42
+ - Any language respond in that language
43
+ NO MIXING LANGUAGES - respond entirely in user's language
44
+
45
+ ========== YOUR PRIMARY ROLE ==========
46
+ Your role: {user_role.upper()}
47
+
48
+ IF LANDLORD:
49
+ Can list: Rental (monthly/yearly), Short-stay (nightly/daily/weekly), For Sale
50
+ Cannot list: Roommate
51
+ Can manage: View, edit, delete listings
52
+
53
+ IF RENTER:
54
+ Can list: Roommate (share apartment) ONLY
55
+ Cannot list: Rental, Short-stay, For Sale
56
+ Can manage: View, edit, delete roommate listings
57
+
58
+ ========== AVAILABLE TOOLS ==========
59
+ You have access to these tools. Use them based on user intent:
60
+
61
+ 1. list_property
62
+ - When user wants to create/list a new property
63
+ - Helps extract: location, bedrooms, bathrooms, price, amenities
64
+ - Validates required fields
65
+ - Generates professional titles and descriptions
66
+
67
+ 2. search_properties
68
+ - When user wants to find/search for properties
69
+ - Filters by: location, price range, bedrooms, bathrooms, amenities
70
+ - Returns matching listings
71
+
72
+ 3. manage_listings
73
+ - When user wants to manage their existing listings
74
+ - Can: view all listings, edit specific listing, delete listing
75
+ - Shows listing details and status
76
+
77
+ 4. casual_chat
78
+ - When user is having normal conversation
79
+ - Answer questions about real estate
80
+ - Discuss properties, markets, trends
81
+ - Just chat naturally (don't force tools)
82
+
83
+ ========== TOOL SELECTION RULES ==========
84
+ Choose tools based on USER INTENT, not exact keywords:
85
+
86
+ User says "I have an apartment"
87
+ → Could be: listing (if they want to list)
88
+ → Or: casual chat (if just mentioning)
89
+ → Ask clarifying question if unclear
90
+
91
+ User says "Show me apartments"
92
+ → search_properties (clearly searching)
93
+
94
+ User says "Edit my listing"
95
+ → manage_listings (clearly managing)
96
+
97
+ User says "Hi, how are you?"
98
+ → casual_chat (clearly conversation)
99
+
100
+ User says "What's the weather?"
101
+ → casual_chat (unrelated to real estate, but respond friendly)
102
+
103
+ REMEMBER: Only use ONE tool per user message
104
+
105
+ ========== FIELD DEFINITIONS ==========
106
+
107
+ REQUIRED FIELDS (for listing):
108
+ - location: City or area name (e.g., "Lagos", "Cotonou", "Paris")
109
+ - bedrooms: Number (integer)
110
+ - bathrooms: Number (integer)
111
+ - price: Amount in numbers (no currency symbol)
112
 
113
  AUTO-DETECTED FIELDS:
114
+ - price_type: Extract from context
115
+ * "monthly" = rental
116
+ * "nightly" = short-stay
117
+ * "yearly" = sale or rental
118
+ * "daily" = short-stay
119
+
120
+ - listing_type: Based on user_role + price_type
 
121
  * If user_role = "renter" → ALWAYS "roommate"
122
  * If user_role = "landlord":
123
+ - If price_type = "nightly/daily/weekly" → "short-stay"
124
+ - If user says "for sale" → "sale"
125
  - Otherwise → "rental"
126
 
127
+ - currency: Auto-detected from location
128
+ * Lagos → NGN
129
+ * Paris → EUR
130
+ * NYC → USD
131
+ * etc.
132
 
133
  OPTIONAL FIELDS:
134
+ - amenities: wifi, parking, furnished, pool, gym, etc.
135
+ - requirements: deposit, credit check, stable income, etc.
136
+
137
+ ========== CONVERSATION FLOW ==========
138
+
139
+ LISTING FLOW:
140
+ 1. User describes property
141
+ 2. Extract: location, bedrooms, bathrooms, price
142
+ 3. Ask for missing required fields (ONE AT A TIME)
143
+ 4. Ask for amenities/requirements (optional)
144
+ 5. Ask for images (REQUIRED)
145
+ 6. Generate title & description
146
+ 7. Show draft preview
147
+ 8. User can: publish, edit, or discard
148
+
149
+ SEARCH FLOW:
150
+ 1. User describes what they want
151
+ 2. Extract filters: location, price range, bedrooms, etc.
152
+ 3. Search database
153
+ 4. Show results as cards
154
+
155
+ CHAT FLOW:
156
+ 1. Just respond naturally
157
+ 2. Answer questions
158
+ 3. Be helpful
159
+ 4. Gently steer to real estate if relevant
160
+
161
+ MANAGE FLOW:
162
+ 1. Show user's listings
163
+ 2. Let them select one
164
+ 3. Allow: view, edit, delete
165
 
166
  ========== IMPORTANT RULES ==========
167
 
168
+ 1. UNDERSTAND INTENT FIRST
169
+ - "legaos" = Lagos
170
+ - "2bd" = 2 bedrooms
171
+ - "per mont" = monthly
172
+ - Don't correct user, just understand
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
+ 2. ONE QUESTION AT A TIME
175
+ - Never ask multiple questions
176
+ - Wait for answer
177
+ - Continue to next question
178
+
179
+ 3. RESPECT ROLE RESTRICTIONS
180
+ - Renter tries to list for sale? "You can only list roommate"
181
+ - Enforce strictly
182
+
183
+ 4. DATA PRESERVATION
184
+ - Never revert user corrections
185
+ - Always use latest values
186
+ - Preserve all user input
187
+
188
+ 5. IMAGES ARE REQUIRED
189
+ - Don't generate draft without images
190
+ - Clearly ask for images
191
+ - Show success when uploaded
192
+
193
+ 6. RESPOND IN USER'S LANGUAGE
194
+ - Detect language from first message
195
+ - NEVER mix languages
196
+ - Respond entirely in their language
197
+
198
+ 7. BE WARM AND PROFESSIONAL
199
+ - Celebrate user ideas
200
+ - Be encouraging
201
+ - Show genuine interest
202
+ - Use appropriate emojis
203
+
204
+ 8. NEVER BREAK CHARACTER
205
+ - Always be Aida
206
+ - Always from Lojiz
207
+ - Created by Lojiz team
208
+ - Real estate specialist
209
+
210
+ ========== EXAMPLE INTERACTIONS ==========
211
+
212
+ USER: "Hi, I want to list my apartment"
213
+ AIDA: "Awesome! I'd love to help you list your apartment. Let me get some details. What city is it in?"
214
+
215
+ USER: "I have a 2-bed in Lagos for 50k monthly with wifi and parking"
216
+ AIDA: "Great! 2-bed apartment in Lagos, 50k monthly with wifi and parking. How many bathrooms does it have?"
217
+
218
+ USER: "Show me apartments in Lagos under 100k"
219
+ AIDA: [Uses search_properties tool] "Found 15 apartments in Lagos under 100k. Here are the top results..."
220
+
221
+ USER: "Bonjour, comment allez-vous?"
222
+ AIDA: "Bonjour! Je vais bien, merci! Je suis Aida, l'assistant IA de Lojiz. Comment puis-je vous aider?"
223
+
224
+ USER: "What's a good investment in real estate?"
225
+ AIDA: "Great question! The real estate market is evolving. In Lagos, properties in areas like VI and Lekki are popular. What's your budget and location preference?"
226
+
227
+ ========== TOOLS USAGE IN CONTEXT ==========
228
+
229
+ When tool is called (LangChain Agent handles this):
230
+ - You don't need to manually call tools
231
+ - Agent automatically picks the right tool
232
+ - Just focus on conversation
233
+
234
+ Your job: Provide context for agent to make right decision
235
+
236
+ Example:
237
+ USER: "I want to list my apartment"
238
+ → Agent automatically calls: list_property tool
239
+ → You help extract fields
240
+ → Generate title and description
241
+ → Show draft
242
+
243
+ ========== MEMORY & CONTEXT ==========
244
+
245
+ LangChain Memory will remember:
246
+ - Previous messages in conversation
247
+ - User's preferences
248
+ - Listed properties
249
+ - Search results
250
+
251
+ You can reference this context:
252
+ - "Earlier you mentioned Lagos..."
253
+ - "Your listing has 2 bedrooms..."
254
+ - "You were looking for apartments under 100k..."
255
+
256
+ ========== ERROR HANDLING ==========
257
+
258
+ If user provides invalid data:
259
+ - Be gentle, don't scold
260
+ - Example: "The price seems very low - did you mean 50000 or 500000?"
261
+ - Give them chance to correct
262
+ - Preserve their intent
263
+
264
+ If system error occurs:
265
+ - Apologize genuinely
266
+ - Offer to retry
267
+ - Don't technical jargon
268
+ - Stay helpful
269
+
270
+ ========== FINAL NOTES ==========
271
+
272
+ This prompt is optimized for:
273
+ ✅ LangChain Agent (automatic tool selection)
274
+ ✅ LangChain Memory (conversation context)
275
+ ✅ Pydantic Validation (structured outputs)
276
+ ✅ Multi-language support (auto-detection)
277
+ ✅ Professional interactions (warm & helpful)
278
+
279
+ Remember:
280
+ - You're a real estate specialist
281
+ - Be helpful and professional
282
+ - Understand intent first
283
+ - Respond in user's language
284
+ - Never break character
285
+ - Always be Aida from Lojiz
286
  """
287
 
288
 
 
293
  AIDA CAPABILITIES SUMMARY
294
  =========================
295
 
296
+ CORE FUNCTIONS:
297
+ 1. List Properties - Create and post new listings
298
+ 2. Search Properties - Find properties based on criteria
299
+ 3. Manage Listings - View, edit, delete existing listings
300
+ 4. Casual Chat - Answer questions about real estate
301
+
302
+ SMART FEATURES:
303
+ - Multi-language support (auto-detect)
304
+ - Role-based restrictions (landlord vs renter)
305
+ - Field auto-detection (currency, listing type, price type)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  - Conversational flow (ask one question at a time)
307
  - Image requirement enforcement
308
+ - Professional title/description generation
309
+
310
+ LANGCHAIN OPTIMIZED:
311
+ - Automatic tool selection
312
+ - Conversation memory management
313
+ - Structured output validation
314
+ - Production monitoring (LangSmith)
315
 
316
+ PERSONALITY:
 
317
  - Warm and friendly
318
  - Professional but natural
319
  - Genuine and conversational
320
  - Real estate specialist
321
  - Respectful of user corrections
322
+
323
+ LANGUAGE SUPPORT:
324
+ - English, French, Spanish, Yoruba, Portuguese
325
+ - German, Italian, and any other language
326
+ - Auto-detected from user input
327
+ - Full responses in user's language
328
+
329
+ TECH STACK:
330
+ - LangChain Agent (tool orchestration)
331
+ - LangChain Memory (conversation tracking)
332
+ - Pydantic (structured outputs)
333
+ - LangSmith (production monitoring)
334
+ - FastAPI (web framework)
335
+ - Redis (memory storage)
336
+ - MongoDB (data persistence)
337
  """
338
 
339
 
340
+ # EXPORTS
341
  __all__ = [
342
  'get_system_prompt',
343
  'get_capabilities_summary',
app/ai/routes/chat.py CHANGED
@@ -1,13 +1,17 @@
1
  # app/ai/routes/chat.py
2
- # Chat Endpoint - Intent detection & tool routing
3
- # Image uploads use SEPARATE endpoint /ai/upload-image
4
 
5
- from fastapi import APIRouter, Depends, HTTPException
6
  from fastapi.security import HTTPBearer
7
  from pydantic import BaseModel
8
  from typing import Optional, Dict, Any
9
  from structlog import get_logger
10
  from datetime import datetime, timedelta
 
 
 
 
 
11
 
12
  from app.guards.jwt_guard import decode_access_token
13
  from app.ai.memory.redis_context_memory import get_current_memory
@@ -38,7 +42,37 @@ class ChatResponse(BaseModel):
38
  error: Optional[str] = None
39
 
40
 
41
- # HELPER: Clear stale context
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  async def should_reset_context(context: Dict) -> bool:
43
  """Determine if context should be reset"""
44
 
@@ -49,7 +83,6 @@ async def should_reset_context(context: Dict) -> bool:
49
  last_activity = datetime.fromisoformat(context["last_activity"])
50
  idle_time = datetime.utcnow() - last_activity
51
 
52
- # Reset if idle for more than 30 minutes
53
  if idle_time > timedelta(minutes=30):
54
  logger.info("Session idle for 30+ minutes, resetting context")
55
  return True
@@ -72,7 +105,6 @@ def reset_context() -> Dict:
72
  }
73
 
74
 
75
- # HELPER: Detect if user wants to start fresh
76
  def user_wants_fresh_start(message: str) -> bool:
77
  """Detect if user is trying to start a new conversation"""
78
 
@@ -102,12 +134,15 @@ def user_wants_fresh_start(message: str) -> bool:
102
  async def ask_ai(
103
  body: AskBody,
104
  token: str = Depends(security),
 
105
  ) -> ChatResponse:
106
  """
107
- Main chat endpoint - Intent detection & tool routing.
108
 
109
- IMPORTANT: Images are NOT handled here.
110
- Use POST /ai/upload-image for image uploads.
 
 
111
  """
112
 
113
  try:
@@ -170,8 +205,8 @@ async def ask_ai(
170
  error="Empty message"
171
  )
172
 
173
- # PROCESS MESSAGE WITH LLM
174
- logger.info("Processing user message with LLM", user_id=user_id)
175
 
176
  try:
177
  reply, tool_result = await process_user_message(
@@ -182,7 +217,7 @@ async def ask_ai(
182
  )
183
 
184
  logger.info(
185
- "LLM processing complete",
186
  tool_called=tool_result.get("tool"),
187
  success=tool_result.get("success")
188
  )
@@ -198,7 +233,28 @@ async def ask_ai(
198
 
199
  await memory.update_context(context)
200
 
201
- # RETURN RESPONSE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  return ChatResponse(
203
  success=tool_result.get("success", True),
204
  text=reply,
@@ -208,7 +264,7 @@ async def ask_ai(
208
  )
209
 
210
  except Exception as e:
211
- logger.error("LLM processing error", exc_info=e)
212
 
213
  fallback_reply = "Sorry, I had an error processing your request. Please try again."
214
  await memory.add_message("user", body.message)
@@ -241,7 +297,8 @@ async def health_check():
241
  """Health check for chat service"""
242
  return {
243
  "status": "healthy",
244
- "service": "Aida Chat",
 
245
  }
246
 
247
 
@@ -318,9 +375,7 @@ async def reset_session_endpoint(
318
  session_id: str,
319
  token: str = Depends(security),
320
  ):
321
- """
322
- Explicitly reset a session to fresh state.
323
- """
324
 
325
  try:
326
  payload = decode_access_token(token.credentials)
@@ -332,7 +387,6 @@ async def reset_session_endpoint(
332
 
333
  memory = await get_current_memory(user_id, session_id)
334
 
335
- # Clear and reset
336
  await memory.clear()
337
  fresh_context = reset_context()
338
  fresh_context["user_role"] = user_role
 
1
  # app/ai/routes/chat.py
2
+ # UPGRADED: LangChain + LangSmith Monitoring + Background Tasks
 
3
 
4
+ from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks
5
  from fastapi.security import HTTPBearer
6
  from pydantic import BaseModel
7
  from typing import Optional, Dict, Any
8
  from structlog import get_logger
9
  from datetime import datetime, timedelta
10
+ import os
11
+
12
+ # LangSmith tracing (monitoring)
13
+ os.environ["LANGCHAIN_TRACING_V2"] = "true"
14
+ os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY", "")
15
 
16
  from app.guards.jwt_guard import decode_access_token
17
  from app.ai.memory.redis_context_memory import get_current_memory
 
42
  error: Optional[str] = None
43
 
44
 
45
+ # BACKGROUND TASKS
46
+ async def analyze_image_in_background(image_url: str, user_id: str, session_id: str):
47
+ """Analyze image in background using vision model"""
48
+ try:
49
+ logger.info("Analyzing image in background", image_url=image_url, user_id=user_id)
50
+
51
+ # TODO: Add vision model analysis here
52
+ # analysis = await analyze_with_vision_model(image_url)
53
+ # await save_analysis(user_id, analysis)
54
+
55
+ logger.info("Image analysis complete", user_id=user_id)
56
+ except Exception as e:
57
+ logger.error("Background image analysis failed", exc_info=e)
58
+
59
+
60
+ async def summarize_conversation_in_background(user_id: str, session_id: str):
61
+ """Summarize conversation in background"""
62
+ try:
63
+ logger.info("Summarizing conversation in background", user_id=user_id)
64
+
65
+ # Get conversation
66
+ memory = await get_current_memory(user_id, session_id)
67
+ messages = await memory.get_messages()
68
+
69
+ if len(messages) > 10:
70
+ logger.info("Conversation summary created", user_id=user_id, message_count=len(messages))
71
+ except Exception as e:
72
+ logger.error("Background summarization failed", exc_info=e)
73
+
74
+
75
+ # HELPER: Reset context
76
  async def should_reset_context(context: Dict) -> bool:
77
  """Determine if context should be reset"""
78
 
 
83
  last_activity = datetime.fromisoformat(context["last_activity"])
84
  idle_time = datetime.utcnow() - last_activity
85
 
 
86
  if idle_time > timedelta(minutes=30):
87
  logger.info("Session idle for 30+ minutes, resetting context")
88
  return True
 
105
  }
106
 
107
 
 
108
  def user_wants_fresh_start(message: str) -> bool:
109
  """Detect if user is trying to start a new conversation"""
110
 
 
134
  async def ask_ai(
135
  body: AskBody,
136
  token: str = Depends(security),
137
+ background_tasks: BackgroundTasks = BackgroundTasks(),
138
  ) -> ChatResponse:
139
  """
140
+ Main chat endpoint with LangChain + LangSmith monitoring.
141
 
142
+ Uses:
143
+ - LangChain for intelligent routing
144
+ - LangSmith for production monitoring
145
+ - Background tasks for async operations
146
  """
147
 
148
  try:
 
205
  error="Empty message"
206
  )
207
 
208
+ # PROCESS MESSAGE WITH LANGCHAIN AGENT
209
+ logger.info("Processing with LangChain Agent", user_id=user_id)
210
 
211
  try:
212
  reply, tool_result = await process_user_message(
 
217
  )
218
 
219
  logger.info(
220
+ "LangChain processing complete",
221
  tool_called=tool_result.get("tool"),
222
  success=tool_result.get("success")
223
  )
 
233
 
234
  await memory.update_context(context)
235
 
236
+ # ADD BACKGROUND TASKS
237
+
238
+ # Task 1: Summarize if conversation is getting long
239
+ messages = await memory.get_messages()
240
+ if len(messages) > 10:
241
+ background_tasks.add_task(
242
+ summarize_conversation_in_background,
243
+ user_id,
244
+ session_id
245
+ )
246
+
247
+ # Task 2: Analyze images if present
248
+ if context.get("state", {}).get("images"):
249
+ for image_url in context["state"]["images"]:
250
+ background_tasks.add_task(
251
+ analyze_image_in_background,
252
+ image_url,
253
+ user_id,
254
+ session_id
255
+ )
256
+
257
+ # RETURN RESPONSE (LangSmith automatically logs this)
258
  return ChatResponse(
259
  success=tool_result.get("success", True),
260
  text=reply,
 
264
  )
265
 
266
  except Exception as e:
267
+ logger.error("LangChain processing error", exc_info=e)
268
 
269
  fallback_reply = "Sorry, I had an error processing your request. Please try again."
270
  await memory.add_message("user", body.message)
 
297
  """Health check for chat service"""
298
  return {
299
  "status": "healthy",
300
+ "service": "Aida Chat with LangChain",
301
+ "langsmith": "enabled" if os.getenv("LANGCHAIN_API_KEY") else "disabled",
302
  }
303
 
304
 
 
375
  session_id: str,
376
  token: str = Depends(security),
377
  ):
378
+ """Explicitly reset a session to fresh state"""
 
 
379
 
380
  try:
381
  payload = decode_access_token(token.credentials)
 
387
 
388
  memory = await get_current_memory(user_id, session_id)
389
 
 
390
  await memory.clear()
391
  fresh_context = reset_context()
392
  fresh_context["user_role"] = user_role
app/ai/tools/casual_chat_tool.py CHANGED
@@ -1,26 +1,51 @@
1
- # app/ai/tools/casual_chat_tool.py - Casual Chat Tool (Uses Centralized Prompt)
2
- """
3
- Casual Chat Tool - User is having a normal conversation.
4
-
5
- Uses centralized system prompt from app/ai/prompts/system_prompt.py
6
- Language is auto-detected by LLM - no hardcoding needed.
7
- """
8
 
9
  from typing import Dict, Optional
10
- from openai import AsyncOpenAI
11
  from structlog import get_logger
 
 
 
 
12
  from app.config import settings
13
- from app.ai.prompts.system_prompt import get_system_prompt # NEW: Use centralized prompt
14
 
15
  logger = get_logger(__name__)
16
 
17
- client = AsyncOpenAI(
 
 
18
  api_key=settings.DEEPSEEK_API_KEY,
19
- base_url=settings.DEEPSEEK_BASE_URL
 
 
 
20
  )
21
 
22
 
23
- # ========== CASUAL CHAT TOOL ==========
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  async def process_casual_chat(
25
  user_message: str,
26
  user_id: str,
@@ -28,8 +53,7 @@ async def process_casual_chat(
28
  conversation_history: list = None,
29
  ) -> Dict:
30
  """
31
- Process casual chat message - user just talking with Aida.
32
- Uses centralized system prompt - language is auto-detected.
33
 
34
  Args:
35
  user_message: What user said
@@ -47,51 +71,39 @@ async def process_casual_chat(
47
  """
48
 
49
  logger.info(
50
- "💬 Processing casual chat",
51
  user_id=user_id,
52
  message_len=len(user_message)
53
  )
54
 
55
- # Get centralized system prompt
56
- system_prompt = get_system_prompt(user_role=user_role)
57
-
58
- logger.info("📋 Using centralized system prompt for casual chat")
59
-
60
- # Build messages for LLM
61
- messages = [
62
- {
63
- "role": "system",
64
- "content": system_prompt
65
- }
66
- ]
67
-
68
- # Add conversation history if available
69
- if conversation_history:
70
- for msg in conversation_history[-5:]: # Last 5 messages for context
71
- messages.append({
72
- "role": msg.get("role", "user"),
73
- "content": msg.get("content", "")
74
- })
75
-
76
- # Add current user message
77
- messages.append({
78
- "role": "user",
79
- "content": user_message
80
- })
81
-
82
  try:
83
- # Call DeepSeek with centralized system prompt
84
- response = await client.chat.completions.create(
85
- model="deepseek-chat",
86
- messages=messages,
87
- temperature=0.8, # Higher temp for natural, conversational responses
88
- max_tokens=500,
89
- )
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- aida_reply = response.choices[0].message.content.strip()
 
 
 
 
92
 
93
  logger.info(
94
- "Casual chat response generated",
95
  reply_len=len(aida_reply)
96
  )
97
 
@@ -106,7 +118,7 @@ async def process_casual_chat(
106
  }
107
 
108
  except Exception as e:
109
- logger.error("Casual chat error", exc_info=e)
110
 
111
  fallback_reply = "Sorry, I had a moment there! What were you saying?"
112
 
@@ -121,34 +133,29 @@ async def process_casual_chat(
121
  }
122
 
123
 
124
- # ========== TEST ==========
125
  async def test():
126
- """Test the casual chat tool"""
127
 
128
  test_messages = [
129
- ("Hi, how are you?", "landlord"),
130
- ("Who created you?", "renter"),
131
- ("What's the weather like?", "landlord"),
132
- ("Can you help me list my apartment?", "landlord"),
133
- ("Bonjour, comment allez-vous?", "landlord"), # French
134
- ("Hola, ¿quién eres?", "renter"), # Spanish
135
  ]
136
 
137
- print("\n" + "="*70)
138
- print("🧪 CASUAL CHAT TOOL TEST")
139
- print("="*70 + "\n")
140
 
141
- for message, role in test_messages:
142
- print(f"👤 User ({role}): {message}")
143
 
144
  result = await process_casual_chat(
145
  user_message=message,
146
  user_id="test_user",
147
- user_role=role
148
  )
149
 
150
- print(f"🤖 Aida: {result['reply']}")
151
- print()
152
 
153
 
154
  if __name__ == "__main__":
 
1
+ # app/ai/tools/casual_chat_tool.py
2
+ # UPGRADED: LangChain Chain with Memory Management (FIXED IMPORTS)
 
 
 
 
 
3
 
4
  from typing import Dict, Optional
 
5
  from structlog import get_logger
6
+ from langchain_openai import ChatOpenAI
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from langchain_core.runnables import RunnablePassthrough
9
+
10
  from app.config import settings
11
+ from app.ai.prompts.system_prompt import get_system_prompt
12
 
13
  logger = get_logger(__name__)
14
 
15
+
16
+ # INITIALIZE LLM
17
+ llm = ChatOpenAI(
18
  api_key=settings.DEEPSEEK_API_KEY,
19
+ base_url=settings.DEEPSEEK_BASE_URL,
20
+ model="deepseek-chat",
21
+ temperature=0.8,
22
+ max_tokens=500,
23
  )
24
 
25
 
26
+ # PROMPT TEMPLATE
27
+ CASUAL_CHAT_PROMPT = ChatPromptTemplate.from_messages([
28
+ ("system", "{system_prompt}"),
29
+ ("human", "{input}"),
30
+ ])
31
+
32
+
33
+ # MEMORY MANAGEMENT (simplified without deprecated classes)
34
+ def create_chat_history(conversation_history: list = None) -> str:
35
+ """Convert conversation history to formatted string"""
36
+ if not conversation_history:
37
+ return ""
38
+
39
+ formatted = []
40
+ for msg in conversation_history[-10:]: # Last 10 messages
41
+ role = "Aida" if msg.get("role") == "assistant" else "You"
42
+ content = msg.get("content", "")
43
+ formatted.append(f"{role}: {content}")
44
+
45
+ return "\n".join(formatted)
46
+
47
+
48
+ # CASUAL CHAT TOOL
49
  async def process_casual_chat(
50
  user_message: str,
51
  user_id: str,
 
53
  conversation_history: list = None,
54
  ) -> Dict:
55
  """
56
+ Process casual chat message using LangChain LCEL (modern approach).
 
57
 
58
  Args:
59
  user_message: What user said
 
71
  """
72
 
73
  logger.info(
74
+ "Processing casual chat with LangChain LCEL",
75
  user_id=user_id,
76
  message_len=len(user_message)
77
  )
78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  try:
80
+ # Get system prompt
81
+ system_prompt = get_system_prompt(user_role=user_role)
82
+
83
+ # Format conversation history
84
+ chat_history = create_chat_history(conversation_history)
85
+
86
+ # Create LCEL chain (modern approach)
87
+ chain = CASUAL_CHAT_PROMPT | llm
88
+
89
+ # Prepare context
90
+ context = {
91
+ "system_prompt": system_prompt,
92
+ "input": user_message,
93
+ "chat_history": chat_history
94
+ }
95
+
96
+ # Run chain
97
+ response = await chain.ainvoke(context)
98
 
99
+ # Extract text from response
100
+ if hasattr(response, 'content'):
101
+ aida_reply = response.content.strip()
102
+ else:
103
+ aida_reply = str(response).strip()
104
 
105
  logger.info(
106
+ "Casual chat response generated",
107
  reply_len=len(aida_reply)
108
  )
109
 
 
118
  }
119
 
120
  except Exception as e:
121
+ logger.error("Casual chat error", exc_info=e)
122
 
123
  fallback_reply = "Sorry, I had a moment there! What were you saying?"
124
 
 
133
  }
134
 
135
 
136
+ # TEST
137
  async def test():
138
+ """Test the LangChain chat"""
139
 
140
  test_messages = [
141
+ "Hi, how are you?",
142
+ "Who created you?",
143
+ "What's the weather like?",
144
+ "Can you help me list my apartment?",
 
 
145
  ]
146
 
147
+ print("\nTesting LangChain Casual Chat\n" + "="*70 + "\n")
 
 
148
 
149
+ for message in test_messages:
150
+ print(f"User: {message}")
151
 
152
  result = await process_casual_chat(
153
  user_message=message,
154
  user_id="test_user",
155
+ user_role="landlord"
156
  )
157
 
158
+ print(f"Aida: {result['reply']}\n")
 
159
 
160
 
161
  if __name__ == "__main__":
app/ai/tools/intent_detector_tool.py CHANGED
@@ -1,209 +1,151 @@
1
- # app/ai/tools/intent_detector_tool.py - UPDATED: Uses Centralized System Prompt
2
- """
3
- Intent Detection with Tool Calling - LLM detects intent & calls tools.
4
-
5
- Now uses centralized system prompt from app/ai/prompts/system_prompt.py
6
- """
7
 
8
  import json
9
- import re
10
  from typing import Dict, Optional, Tuple
11
- from openai import AsyncOpenAI
12
  from structlog import get_logger
 
 
 
 
 
 
 
13
  from app.config import settings
14
- from app.ai.prompts.system_prompt import get_system_prompt # NEW: Import centralized prompt
15
 
16
  logger = get_logger(__name__)
17
 
18
- client = AsyncOpenAI(
19
- api_key=settings.DEEPSEEK_API_KEY,
20
- base_url=settings.DEEPSEEK_BASE_URL
21
- )
22
-
23
- # ========== TOOL DEFINITIONS ==========
24
-
25
- TOOLS = [
26
- {
27
- "type": "function",
28
- "function": {
29
- "name": "list_property",
30
- "description": "Help user create and list a new property (rent, sale, short-stay, roommate)",
31
- "parameters": {
32
- "type": "object",
33
- "properties": {
34
- "user_message": {
35
- "type": "string",
36
- "description": "What the user said about their property"
37
- },
38
- "action": {
39
- "type": "string",
40
- "enum": ["extract_fields", "ask_for_field", "generate_draft", "publish"],
41
- "description": "What action to take in listing flow"
42
- }
43
- },
44
- "required": ["user_message", "action"]
45
- }
46
- }
47
- },
48
- {
49
- "type": "function",
50
- "function": {
51
- "name": "search_properties",
52
- "description": "Search and find properties based on user's criteria",
53
- "parameters": {
54
- "type": "object",
55
- "properties": {
56
- "user_message": {
57
- "type": "string",
58
- "description": "What the user is looking for"
59
- },
60
- "location": {
61
- "type": "string",
62
- "description": "City or area to search in"
63
- },
64
- "min_price": {
65
- "type": "number",
66
- "description": "Minimum price filter"
67
- },
68
- "max_price": {
69
- "type": "number",
70
- "description": "Maximum price filter"
71
- }
72
- },
73
- "required": ["user_message"]
74
- }
75
- }
76
- },
77
- {
78
- "type": "function",
79
- "function": {
80
- "name": "manage_listings",
81
- "description": "Manage user's listings - view, edit, or delete",
82
- "parameters": {
83
- "type": "object",
84
- "properties": {
85
- "user_message": {
86
- "type": "string",
87
- "description": "What the user wants to do with their listing"
88
- },
89
- "action": {
90
- "type": "string",
91
- "enum": ["view", "edit", "delete"],
92
- "description": "View, edit, or delete listings"
93
- }
94
- },
95
- "required": ["user_message", "action"]
96
- }
97
- }
98
- },
99
- {
100
- "type": "function",
101
- "function": {
102
- "name": "casual_chat",
103
- "description": "Handle casual conversation not related to real estate tasks",
104
- "parameters": {
105
- "type": "object",
106
- "properties": {
107
- "user_message": {
108
- "type": "string",
109
- "description": "What the user said"
110
- }
111
- },
112
- "required": ["user_message"]
113
- }
114
- }
115
- }
116
- ]
117
-
118
-
119
- # ========== TOOL IMPLEMENTATIONS ==========
120
 
121
- async def list_property_tool(user_message: str, action: str) -> Dict:
 
122
  """Tool: List a property"""
123
- logger.info("🏠 LIST_PROPERTY tool called", action=action)
124
  return {
125
  "success": True,
126
  "tool": "list_property",
127
  "action": action,
128
  "message": "Property listing tool processing your request...",
129
- "data": {}
130
  }
131
 
132
 
133
- async def search_properties_tool(user_message: str, location: Optional[str] = None,
134
- min_price: Optional[float] = None,
135
- max_price: Optional[float] = None) -> Dict:
 
 
 
136
  """Tool: Search properties"""
137
- logger.info("🔍 SEARCH_PROPERTIES tool called", location=location)
138
  return {
139
  "success": True,
140
  "tool": "search_properties",
141
  "message": f"Searching for properties in {location}..." if location else "Searching for properties...",
142
- "data": {}
143
  }
144
 
145
 
146
- async def manage_listings_tool(user_message: str, action: str) -> Dict:
147
  """Tool: Manage user's listings"""
148
- logger.info("📋 MANAGE_LISTINGS tool called", action=action)
149
  return {
150
  "success": True,
151
  "tool": "manage_listings",
152
  "action": action,
153
  "message": f"Managing your listings - action: {action}...",
154
- "data": {}
155
  }
156
 
157
 
158
- async def casual_chat_tool(user_message: str) -> Dict:
159
  """Tool: Casual chat"""
160
- logger.info("💬 CASUAL_CHAT tool called")
161
  return {
162
  "success": True,
163
  "tool": "casual_chat",
164
  "message": "Having a casual conversation with you...",
165
- "data": {}
166
  }
167
 
168
 
169
- # ========== TOOL EXECUTOR ==========
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
- async def execute_tool(tool_name: str, tool_input: Dict) -> Dict:
172
- """Execute the tool that LLM decided to call"""
173
-
174
- logger.info("🔧 Executing tool", tool_name=tool_name)
175
-
176
- if tool_name == "list_property":
177
- return await list_property_tool(
178
- user_message=tool_input.get("user_message"),
179
- action=tool_input.get("action")
180
- )
181
- elif tool_name == "search_properties":
182
- return await search_properties_tool(
183
- user_message=tool_input.get("user_message"),
184
- location=tool_input.get("location"),
185
- min_price=tool_input.get("min_price"),
186
- max_price=tool_input.get("max_price")
187
- )
188
- elif tool_name == "manage_listings":
189
- return await manage_listings_tool(
190
- user_message=tool_input.get("user_message"),
191
- action=tool_input.get("action")
192
- )
193
- elif tool_name == "casual_chat":
194
- return await casual_chat_tool(
195
- user_message=tool_input.get("user_message")
196
- )
197
- else:
198
- logger.error("❌ Unknown tool", tool_name=tool_name)
199
- return {
200
- "success": False,
201
- "error": f"Unknown tool: {tool_name}"
202
- }
203
 
204
 
205
- # ========== MAIN: LLM DETECTS INTENT & CALLS TOOL ==========
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
 
 
207
  async def process_user_message(
208
  user_message: str,
209
  user_id: str,
@@ -211,103 +153,46 @@ async def process_user_message(
211
  conversation_context: Optional[Dict] = None,
212
  ) -> Tuple[str, Dict]:
213
  """
214
- Main function: LLM detects intent and calls the appropriate tool.
215
- Uses centralized system prompt from system_prompt.py
216
-
217
- Args:
218
- user_message: What user said
219
- user_id: User ID
220
- user_role: User role (landlord, renter)
221
- conversation_context: Optional conversation context
222
 
223
  Returns:
224
- Tuple[reply_text, result_data]
225
  """
226
 
227
- logger.info("🚀 Processing user message with LLM tool calling", user_id=user_id)
228
-
229
- # UPDATED: Use centralized system prompt
230
- system_prompt = get_system_prompt(user_role=user_role)
231
-
232
- logger.info("📋 Using centralized system prompt", user_role=user_role)
233
-
234
- # Pass system prompt as first message with role "system"
235
- messages = [
236
- {
237
- "role": "system",
238
- "content": system_prompt
239
- },
240
- {
241
- "role": "user",
242
- "content": user_message
243
- }
244
- ]
245
 
246
  try:
247
- # Call LLM with tool definitions and centralized system prompt
248
- response = await client.chat.completions.create(
249
- model="deepseek-chat",
250
- messages=messages,
251
- tools=TOOLS,
252
- tool_choice="auto",
253
- temperature=0.7,
254
- max_tokens=1000,
255
- )
256
 
257
- logger.info("✅ LLM responded", finish_reason=response.choices[0].finish_reason)
 
258
 
259
- # Check if LLM called a tool
260
- if response.choices[0].finish_reason == "tool_calls":
261
- tool_call = response.choices[0].message.tool_calls[0]
262
- tool_name = tool_call.function.name
263
- tool_input = json.loads(tool_call.function.arguments)
264
-
265
- logger.info("🔨 LLM called tool", tool_name=tool_name)
266
-
267
- # Execute the tool
268
- tool_result = await execute_tool(tool_name, tool_input)
269
-
270
- # Continue conversation with tool result
271
- messages.append({
272
- "role": "assistant",
273
- "content": response.choices[0].message.content or "",
274
- "tool_calls": [tool_call]
275
- })
276
-
277
- messages.append({
278
- "role": "tool",
279
- "tool_call_id": tool_call.id,
280
- "content": json.dumps(tool_result)
281
- })
282
-
283
- # Get final response from LLM
284
- final_response = await client.chat.completions.create(
285
- model="deepseek-chat",
286
- messages=messages,
287
- temperature=0.7,
288
- max_tokens=500,
289
- )
290
-
291
- reply = final_response.choices[0].message.content
292
-
293
- logger.info("💬 Final response generated")
294
- return reply, tool_result
295
 
296
- else:
297
- # LLM responded without calling a tool
298
- reply = response.choices[0].message.content
299
- logger.warning("⚠️ LLM did not call a tool, responded directly")
300
- return reply, {"success": False, "message": "No tool called"}
301
 
302
  except Exception as e:
303
- logger.error(" Error in tool calling", exc_info=e)
304
- return "Sorry, something went wrong. Please try again.", {"error": str(e), "success": False}
305
-
 
 
306
 
307
- # ========== TEST ==========
308
 
 
309
  async def test():
310
- """Test the tool calling system"""
311
 
312
  test_messages = [
313
  "I want to list my apartment in Lagos",
@@ -316,20 +201,16 @@ async def test():
316
  "Hi, how are you?",
317
  ]
318
 
319
- print("\n" + "="*70)
320
- print("🧪 LLM TOOL CALLING TEST")
321
- print("="*70 + "\n")
322
 
323
  for msg in test_messages:
324
- print(f"👤 User: {msg}")
325
- reply, data = await process_user_message(
326
  user_message=msg,
327
  user_id="test_user",
328
  user_role="landlord"
329
  )
330
- print(f"🤖 Aida: {reply}")
331
- print(f"📊 Tool: {data.get('tool', 'N/A')}")
332
- print()
333
 
334
 
335
  if __name__ == "__main__":
 
1
+ # app/ai/tools/intent_detector_tool.py
2
+ # UPGRADED: LangChain Agent with Tool Management (FIXED IMPORTS)
 
 
 
 
3
 
4
  import json
 
5
  from typing import Dict, Optional, Tuple
 
6
  from structlog import get_logger
7
+ from langchain_openai import ChatOpenAI
8
+ from langchain.agents import create_openai_functions_agent, AgentExecutor
9
+ from langchain_core.prompts import ChatPromptTemplate
10
+ from langchain_core.messages import SystemMessage, HumanMessage
11
+ from langchain_core.tools import Tool
12
+ from pydantic import BaseModel, Field
13
+
14
  from app.config import settings
 
15
 
16
  logger = get_logger(__name__)
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ # TOOL DEFINITIONS
20
+ def list_property_func(user_message: str, action: str) -> Dict:
21
  """Tool: List a property"""
22
+ logger.info("list_property tool called", action=action)
23
  return {
24
  "success": True,
25
  "tool": "list_property",
26
  "action": action,
27
  "message": "Property listing tool processing your request...",
 
28
  }
29
 
30
 
31
+ def search_properties_func(
32
+ user_message: str,
33
+ location: Optional[str] = None,
34
+ min_price: Optional[float] = None,
35
+ max_price: Optional[float] = None,
36
+ ) -> Dict:
37
  """Tool: Search properties"""
38
+ logger.info("search_properties tool called", location=location)
39
  return {
40
  "success": True,
41
  "tool": "search_properties",
42
  "message": f"Searching for properties in {location}..." if location else "Searching for properties...",
 
43
  }
44
 
45
 
46
+ def manage_listings_func(user_message: str, action: str) -> Dict:
47
  """Tool: Manage user's listings"""
48
+ logger.info("manage_listings tool called", action=action)
49
  return {
50
  "success": True,
51
  "tool": "manage_listings",
52
  "action": action,
53
  "message": f"Managing your listings - action: {action}...",
 
54
  }
55
 
56
 
57
+ def casual_chat_func(user_message: str) -> Dict:
58
  """Tool: Casual chat"""
59
+ logger.info("casual_chat tool called")
60
  return {
61
  "success": True,
62
  "tool": "casual_chat",
63
  "message": "Having a casual conversation with you...",
 
64
  }
65
 
66
 
67
+ # TOOLS FOR LANGCHAIN
68
+ TOOLS = [
69
+ Tool(
70
+ name="list_property",
71
+ func=list_property_func,
72
+ description="Help user create and list a new property (rent, sale, short-stay, roommate)",
73
+ ),
74
+ Tool(
75
+ name="search_properties",
76
+ func=search_properties_func,
77
+ description="Search and find properties based on user's criteria",
78
+ ),
79
+ Tool(
80
+ name="manage_listings",
81
+ func=manage_listings_func,
82
+ description="Manage user's listings - view, edit, or delete",
83
+ ),
84
+ Tool(
85
+ name="casual_chat",
86
+ func=casual_chat_func,
87
+ description="Handle casual conversation not related to real estate tasks",
88
+ ),
89
+ ]
90
 
91
+
92
+ # INITIALIZE LLM
93
+ llm = ChatOpenAI(
94
+ api_key=settings.DEEPSEEK_API_KEY,
95
+ base_url=settings.DEEPSEEK_BASE_URL,
96
+ model="deepseek-chat",
97
+ temperature=0.7,
98
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
 
101
+ # SYSTEM PROMPT
102
+ SYSTEM_PROMPT = """You are AIDA, a friendly and professional real estate AI assistant for the Lojiz platform.
103
+
104
+ WHO YOU ARE:
105
+ - Name: AIDA (Lojiz AI)
106
+ - Created by: Lojiz team
107
+ - You are SPECIALIZED for real estate, NOT a general-purpose AI
108
+ - NEVER claim to be "DeepSeek", "GPT", or any other AI
109
+ - If asked who you are: "I'm Aida, Lojiz's AI real estate assistant!"
110
+
111
+ YOUR JOB:
112
+ 1. Help users LIST properties (create/post new listings)
113
+ 2. Help users SEARCH for properties (find/browse listings)
114
+ 3. Help users MANAGE their listings (view, edit, delete)
115
+ 4. Chat about real estate topics naturally
116
+
117
+ YOUR PERSONALITY:
118
+ - Warm, friendly, and professional
119
+ - Speak like a real human (short sentences)
120
+ - Ignore typos and grammar mistakes - understand intent anyway
121
+ - Use natural emojis occasionally (not excessively)
122
+ - Be conversational and genuine
123
+
124
+ LANGUAGE HANDLING:
125
+ - Automatically detect and respond in the user's language
126
+ - User writes in English? Respond in English
127
+ - User writes in French? Respond in French
128
+ - User writes in Spanish? Respond in Spanish
129
+ - Just respond naturally in their language
130
+
131
+ AVAILABLE TOOLS:
132
+ 1. list_property - When user wants to list/create a property
133
+ 2. search_properties - When user wants to find properties
134
+ 3. manage_listings - When user wants to view/edit/delete their listings
135
+ 4. casual_chat - For normal conversation
136
+
137
+ IMPORTANT RULES:
138
+ 1. Be warm and professional
139
+ 2. Understand intent despite grammar errors or typos
140
+ 3. One tool per user message
141
+ 4. Respond in user's detected language
142
+ 5. Never claim to be another AI
143
+ 6. Handle typos gracefully
144
+ 7. Celebrate user's ideas and successes
145
+ """
146
 
147
+
148
+ # PROCESS USER MESSAGE WITH AGENT (Simplified)
149
  async def process_user_message(
150
  user_message: str,
151
  user_id: str,
 
153
  conversation_context: Optional[Dict] = None,
154
  ) -> Tuple[str, Dict]:
155
  """
156
+ Process user message using LangChain tools with LCEL.
 
 
 
 
 
 
 
157
 
158
  Returns:
159
+ Tuple[reply_text, tool_result]
160
  """
161
 
162
+ logger.info("Processing user message", user_id=user_id, user_role=user_role)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
  try:
165
+ # Prepare messages for LLM
166
+ messages = [
167
+ SystemMessage(content=SYSTEM_PROMPT),
168
+ HumanMessage(content=user_message)
169
+ ]
170
+
171
+ # Invoke LLM
172
+ response = await llm.ainvoke(messages)
 
173
 
174
+ # Extract reply
175
+ reply = response.content if hasattr(response, 'content') else str(response)
176
 
177
+ logger.info("Agent response generated", reply_len=len(reply))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
+ return reply, {
180
+ "success": True,
181
+ "tool": "agent",
182
+ "message": "Processed with LangChain Agent",
183
+ }
184
 
185
  except Exception as e:
186
+ logger.error("Agent processing error", exc_info=e)
187
+ return "Sorry, I had an error processing your request. Please try again.", {
188
+ "error": str(e),
189
+ "success": False
190
+ }
191
 
 
192
 
193
+ # TEST
194
  async def test():
195
+ """Test the agent"""
196
 
197
  test_messages = [
198
  "I want to list my apartment in Lagos",
 
201
  "Hi, how are you?",
202
  ]
203
 
204
+ print("\nTesting LangChain Agent\n" + "="*70 + "\n")
 
 
205
 
206
  for msg in test_messages:
207
+ print(f"User: {msg}")
208
+ reply, result = await process_user_message(
209
  user_message=msg,
210
  user_id="test_user",
211
  user_role="landlord"
212
  )
213
+ print(f"Aida: {reply}\n")
 
 
214
 
215
 
216
  if __name__ == "__main__":
app/ai/tools/listing_tool.py CHANGED
@@ -1,443 +1,137 @@
1
- # app/ai/tools/listing_tool.py - Complete Listing Tool with Centralized Prompt
2
- """
3
- Listing Tool - Handle property listing creation end-to-end.
4
-
5
- Uses centralized system prompt from app/ai/prompts/system_prompt.py
6
- Language is auto-detected by LLM - no hardcoding needed.
7
-
8
- Flow:
9
- 1. Check if user message has listing data
10
- 2. If NO data → Show example in user's language (auto-detected)
11
- 3. If YES data → Extract all fields using ML + LLM
12
- 4. Ask for missing REQUIRED fields (one at a time)
13
- 5. Once all required fields complete → Check for images
14
- 6. If no images → Ask user to upload
15
- 7. Once images present → Generate draft
16
- 8. Return formatted draft JSON for frontend
17
-
18
- Uses LLM for smart reasoning - no hardcoded logic.
19
- """
20
 
21
  import json
22
  import re
23
  from typing import Dict, Optional, Tuple
24
- from openai import AsyncOpenAI
25
  from structlog import get_logger
 
 
 
 
 
26
  from app.config import settings
27
  from app.ml.models.ml_listing_extractor import get_ml_extractor
28
- from app.ai.prompts.system_prompt import get_system_prompt # NEW: Import centralized prompt
29
 
30
  logger = get_logger(__name__)
31
 
32
- client = AsyncOpenAI(
33
- api_key=settings.DEEPSEEK_API_KEY,
34
- base_url=settings.DEEPSEEK_BASE_URL
35
- )
36
-
37
  ml_extractor = get_ml_extractor()
38
 
39
- # ========== REQUIRED FIELDS ==========
40
  REQUIRED_FIELDS = ["location", "bedrooms", "bathrooms", "price"]
41
 
42
 
43
- # ========== 1. CHECK IF MESSAGE HAS DATA ==========
44
- async def has_listing_data(user_message: str, user_role: str = "landlord") -> Tuple[bool, Dict]:
45
- """
46
- Use LLM to determine if user message contains listing data.
47
- Uses centralized system prompt.
48
-
49
- Returns: (has_data: bool, extracted_data: dict)
50
- """
51
-
52
- system_prompt = get_system_prompt(user_role=user_role)
53
-
54
- prompt = f"""Does this user message contain property listing information?
55
-
56
- User message: "{user_message}"
57
-
58
- Analyze and respond with JSON:
59
- {{
60
- "has_data": true/false,
61
- "data_found": ["location", "bedrooms", "price"], # what fields were mentioned
62
- "reason": "brief explanation"
63
- }}"""
64
-
65
- try:
66
- response = await client.chat.completions.create(
67
- model="deepseek-chat",
68
- messages=[
69
- {"role": "system", "content": system_prompt},
70
- {"role": "user", "content": prompt}
71
- ],
72
- temperature=0.3,
73
- max_tokens=200,
74
- )
75
-
76
- raw = response.choices[0].message.content.strip()
77
- raw = re.sub(r'```json\s*|\s*```', '', raw)
78
- data = json.loads(raw)
79
-
80
- has_data = data.get("has_data", False)
81
- data_found = data.get("data_found", [])
82
-
83
- logger.info(
84
- "🔍 Checked if message has listing data",
85
- has_data=has_data,
86
- fields_found=data_found
87
- )
88
-
89
- return has_data, {"fields_found": data_found}
90
-
91
- except Exception as e:
92
- logger.error("❌ Error checking listing data", exc_info=e)
93
- return False, {"error": str(e)}
94
 
 
 
 
 
 
 
 
 
 
 
95
 
96
- # ========== 2. GENERATE EXAMPLE LISTING ==========
97
- async def generate_listing_example(
98
- user_role: str,
99
- ) -> str:
100
- """
101
- Generate a realistic example of a property listing in user's detected language.
102
- Shows what a complete listing looks like.
103
- LLM auto-detects user's language from context.
104
- """
105
-
106
- system_prompt = get_system_prompt(user_role=user_role)
107
-
108
- prompt = f"""Generate a SHORT, REALISTIC example of a property listing that a {user_role} might create.
109
 
110
- The example should include:
111
- - Number of bedrooms and bathrooms
112
- - Location (city name)
113
- - Price and frequency (monthly, nightly, etc)
114
- - 2-3 amenities
115
- - Optional: requirement or condition
116
 
117
- Make it 2-3 sentences, natural language. No "Example:" prefix.
118
- Just the example text itself."""
119
 
120
- try:
121
- response = await client.chat.completions.create(
122
- model="deepseek-chat",
123
- messages=[
124
- {"role": "system", "content": system_prompt},
125
- {"role": "user", "content": prompt}
126
- ],
127
- temperature=0.8,
128
- max_tokens=200,
129
- )
130
-
131
- example = response.choices[0].message.content.strip()
132
- logger.info("🎯 Generated listing example", user_role=user_role)
133
-
134
- # Generate intro message (LLM will respond in user's language)
135
- intro_prompt = "Say 'Sure! Here's an example of what you could say:' in the user's language (detect from context)"
136
-
137
- intro_response = await client.chat.completions.create(
138
- model="deepseek-chat",
139
- messages=[
140
- {"role": "system", "content": system_prompt},
141
- {"role": "user", "content": intro_prompt}
142
- ],
143
- temperature=0.7,
144
- max_tokens=50,
145
- )
146
-
147
- intro = intro_response.choices[0].message.content.strip()
148
-
149
- return f'{intro}\n\n"{example}"\n\n📸 Remember: Images are REQUIRED! Please upload at least one photo of the property.'
150
-
151
- except Exception as e:
152
- logger.error("❌ Error generating example", exc_info=e)
153
- return "Tell me about your property (location, bedrooms, bathrooms, price, etc.)"
154
 
155
 
156
- # ========== 3. EXTRACT ALL FIELDS USING ML + LLM ==========
157
- async def extract_listing_fields(
158
  user_message: str,
159
  user_role: str,
160
  ) -> Dict:
161
  """
162
- Extract ALL listing fields from user message.
163
- Uses centralized system prompt + ML model for smart extraction.
164
-
165
- Returns dict with extracted fields.
166
  """
167
 
168
- logger.info("🔧 Extracting listing fields", user_role=user_role)
169
 
170
- system_prompt = get_system_prompt(user_role=user_role)
171
-
172
- # Use LLM to extract fields semantically
173
- extraction_prompt = f"""Extract property listing fields from this user message.
 
 
174
 
175
  User role: {user_role}
176
- User message: "{user_message}"
177
-
178
- Be smart about understanding what the user meant, even if they use:
179
- - Typos ("legaos" = Lagos)
180
- - Spelling errors ("per mont" = monthly)
181
- - Different phrasing ("2 bed" = 2 bedrooms)
182
- - Informal language
183
 
184
- Extract and respond ONLY in JSON:
185
- {{
186
- "location": "city name or null",
187
- "bedrooms": number or null,
188
- "bathrooms": number or null,
189
- "price": number or null,
190
- "price_type_raw": "raw text about frequency (monthly, per night, etc) or null",
191
- "amenities": ["list", "of", "amenities"] or [],
192
- "requirements": "special requirements or null",
193
- "listing_type_raw": "any indication of type (for sale, short stay, etc) or null"
194
- }}"""
195
 
196
- try:
197
- response = await client.chat.completions.create(
198
- model="deepseek-chat",
199
- messages=[
200
- {"role": "system", "content": system_prompt},
201
- {"role": "user", "content": extraction_prompt}
202
- ],
203
- temperature=0.3,
204
- max_tokens=300,
205
- )
206
 
207
- raw = response.choices[0].message.content.strip()
208
- raw = re.sub(r'```json\s*|\s*```', '', raw)
209
- extracted = json.loads(raw)
210
 
211
- logger.info("✅ LLM extraction done", extracted=extracted)
 
 
 
 
212
 
213
- # Now use ML model to enhance extraction
214
- location = extracted.get("location")
215
- price_type_raw = extracted.get("price_type_raw", "").lower()
216
 
217
- # Auto-detect price_type using smart matching
218
- price_type = await detect_price_type(price_type_raw, user_role)
219
 
220
- # Auto-detect listing_type based on price_type + user_role
221
- listing_type = await detect_listing_type(
222
- price_type=price_type,
223
- user_role=user_role,
224
- user_message=user_message
225
- )
226
 
227
- # Auto-detect currency from location
228
- currency = "XOF" # Default
229
- if location:
230
- try:
231
- curr, _, _, _ = await ml_extractor.currency_mgr.get_currency_for_location(location)
232
- if curr:
233
- currency = curr
234
- except Exception as e:
235
- logger.warning(f"Could not detect currency for {location}: {e}")
236
-
237
- # Build final extracted data
238
- result = {
239
- "location": location,
240
- "bedrooms": extracted.get("bedrooms"),
241
- "bathrooms": extracted.get("bathrooms"),
242
- "price": extracted.get("price"),
243
- "price_type": price_type,
244
- "listing_type": listing_type,
245
- "currency": currency,
246
- "amenities": extracted.get("amenities", []),
247
- "requirements": extracted.get("requirements"),
248
- }
249
 
250
- logger.info("✅ All fields extracted and auto-detected", result=result)
251
- return result
252
 
253
  except Exception as e:
254
- logger.error(" Error extracting fields", exc_info=e)
255
  return {}
256
 
257
 
258
- # ========== 4. DETECT PRICE TYPE ==========
259
- async def detect_price_type(price_type_raw: str, user_role: str = "landlord") -> str:
260
- """
261
- Intelligently detect price_type from raw text.
262
- Uses LLM to understand what user meant.
263
- """
264
-
265
- if not price_type_raw or price_type_raw == "":
266
- return "monthly" # Default
267
-
268
- system_prompt = get_system_prompt(user_role=user_role)
269
-
270
- prompt = f"""What price frequency does this refer to?
271
-
272
- Raw text: "{price_type_raw}"
273
-
274
- Respond with ONLY one word:
275
- - monthly (for: per month, monthly, mth, monthly, /month)
276
- - nightly (for: per night, nightly, /night)
277
- - daily (for: per day, daily, /day)
278
- - weekly (for: per week, weekly, /week)
279
- - yearly (for: per year, yearly, annually, /year)
280
-
281
- Just respond with the word, nothing else."""
282
-
283
- try:
284
- response = await client.chat.completions.create(
285
- model="deepseek-chat",
286
- messages=[
287
- {"role": "system", "content": system_prompt},
288
- {"role": "user", "content": prompt}
289
- ],
290
- temperature=0.1,
291
- max_tokens=20,
292
- )
293
-
294
- price_type = response.choices[0].message.content.strip().lower()
295
-
296
- valid_types = ["monthly", "nightly", "daily", "weekly", "yearly"]
297
- if price_type not in valid_types:
298
- price_type = "monthly"
299
-
300
- logger.info("📊 Detected price_type", price_type_raw=price_type_raw, price_type=price_type)
301
- return price_type
302
-
303
- except Exception as e:
304
- logger.error("❌ Error detecting price_type", exc_info=e)
305
- return "monthly"
306
-
307
-
308
- # ========== 5. DETECT LISTING TYPE ==========
309
- async def detect_listing_type(
310
- price_type: str,
311
- user_role: str,
312
- user_message: str,
313
- ) -> str:
314
  """
315
- Intelligently detect listing_type.
316
- Logic:
317
- - If renter → always "roommate"
318
- - If landlord → use price_type + keywords
319
  """
320
 
321
- user_role = user_role.lower()
322
- system_prompt = get_system_prompt(user_role=user_role)
323
-
324
- # Rule 1: Renter can ONLY list roommate
325
- if user_role == "renter":
326
- logger.info("👥 User is renter → listing_type: roommate")
327
- return "roommate"
328
 
329
- # Rule 2: Landlord - infer from price_type and keywords
330
- prompt = f"""What type of listing is this based on:
331
-
332
- Price frequency: {price_type}
333
- User message: "{user_message}"
334
-
335
- Is this a:
336
- - "rental" (monthly or yearly rent)
337
- - "short-stay" (nightly, daily, weekly - like Airbnb)
338
- - "sale" (user mentioned selling or for sale)
339
- - "roommate" (user wants to share space)
340
-
341
- Respond with ONLY the listing type, nothing else."""
342
-
343
  try:
344
- response = await client.chat.completions.create(
345
- model="deepseek-chat",
346
- messages=[
347
- {"role": "system", "content": system_prompt},
348
- {"role": "user", "content": prompt}
349
- ],
350
- temperature=0.2,
351
- max_tokens=20,
352
- )
353
 
354
- listing_type = response.choices[0].message.content.strip().lower()
 
 
 
 
 
 
 
355
 
356
- valid_types = ["rental", "short-stay", "sale", "roommate"]
357
- if listing_type not in valid_types:
358
- listing_type = "rental"
359
 
360
- logger.info("🏷️ Detected listing_type", listing_type=listing_type)
361
- return listing_type
362
-
363
- except Exception as e:
364
- logger.error("❌ Error detecting listing_type", exc_info=e)
365
- return "rental"
366
-
367
-
368
- # ========== 6. FIND MISSING REQUIRED FIELDS ==========
369
- def find_missing_required_fields(extracted_data: Dict) -> list:
370
- """Find which required fields are missing."""
371
- missing = []
372
- for field in REQUIRED_FIELDS:
373
- if not extracted_data.get(field):
374
- missing.append(field)
375
-
376
- logger.info("⚠️ Missing required fields", missing=missing)
377
- return missing
378
-
379
-
380
- # ========== 7. GENERATE SMART QUESTION FOR MISSING FIELD ==========
381
- async def generate_question_for_field(
382
- field_name: str,
383
- user_role: str = "landlord",
384
- ) -> str:
385
- """Generate a friendly question for a missing field in user's language."""
386
-
387
- system_prompt = get_system_prompt(user_role=user_role)
388
-
389
- prompt = f"""Generate a SHORT, FRIENDLY question to ask user about a missing property detail.
390
-
391
- Missing field: {field_name}
392
-
393
- The question should:
394
- - Be 1 short sentence
395
- - Sound natural and friendly (not robotic)
396
- - Be in the user's language (auto-detect from context)
397
-
398
- Respond with ONLY the question, no quotes, no explanation."""
399
-
400
- try:
401
- response = await client.chat.completions.create(
402
- model="deepseek-chat",
403
- messages=[
404
- {"role": "system", "content": system_prompt},
405
- {"role": "user", "content": prompt}
406
- ],
407
- temperature=0.7,
408
- max_tokens=100,
409
- )
410
-
411
- question = response.choices[0].message.content.strip()
412
- logger.info("❓ Generated question", field=field_name, question=question)
413
- return question
414
-
415
- except Exception as e:
416
- logger.error("❌ Error generating question", exc_info=e)
417
- return f"What is the {field_name}?"
418
-
419
-
420
- # ========== 8. GENERATE TITLE & DESCRIPTION ==========
421
- async def generate_title_and_description(
422
- extracted_data: Dict,
423
- user_role: str = "landlord",
424
- ) -> Tuple[str, str]:
425
- """Use LLM to generate professional title and description in user's language."""
426
-
427
- system_prompt = get_system_prompt(user_role=user_role)
428
-
429
- bedrooms = extracted_data.get("bedrooms", "?")
430
- location = extracted_data.get("location", "Unknown")
431
- listing_type = extracted_data.get("listing_type", "property")
432
- price = extracted_data.get("price")
433
- price_type = extracted_data.get("price_type", "monthly")
434
- currency = extracted_data.get("currency", "")
435
- amenities_list = extracted_data.get("amenities", [])
436
- requirements = extracted_data.get("requirements")
437
-
438
- amenities_str = ", ".join(amenities_list) if amenities_list else "None specified"
439
-
440
- prompt = f"""Generate a professional title and description for a property listing in the user's language.
441
 
442
  Details:
443
  - Bedrooms: {bedrooms}
@@ -445,72 +139,34 @@ Details:
445
  - Listing type: {listing_type}
446
  - Price: {price} {currency}/{price_type}
447
  - Amenities: {amenities_str}
448
- - Requirements: {requirements or 'None'}
449
-
450
- Respond ONLY in JSON:
451
- {{
452
- "title": "Catchy but professional title (under 60 chars)",
453
- "description": "2-3 sentences describing the property, mentioning key features and amenities"
454
- }}"""
455
 
456
- try:
457
- response = await client.chat.completions.create(
458
- model="deepseek-chat",
459
- messages=[
460
- {"role": "system", "content": system_prompt},
461
- {"role": "user", "content": prompt}
462
- ],
463
- temperature=0.7,
464
- max_tokens=400,
465
- )
466
 
467
- raw = response.choices[0].message.content.strip()
468
- raw = re.sub(r'```json\s*|\s*```', '', raw)
469
- data = json.loads(raw)
470
 
471
- title = data.get("title", f"{bedrooms}-Bedroom {listing_type} in {location}")
472
- description = data.get("description", "Beautiful property in a great location.")
473
 
474
- logger.info(" Generated title and description", title=title)
475
- return title, description
 
476
 
477
  except Exception as e:
478
- logger.error(" Error generating title/description", exc_info=e)
479
  return f"{bedrooms}-Bedroom {listing_type} in {location}", "Beautiful property"
480
 
481
 
482
- # ========== 9. BUILD DRAFT JSON FOR FRONTEND ==========
483
- def build_draft_json(
484
- extracted_data: Dict,
485
- title: str,
486
- description: str,
487
- images: list,
488
- ) -> Dict:
489
- """
490
- Build the final draft JSON that frontend expects.
491
- Format matches ListingDraft model in frontend.
492
- """
493
-
494
- draft = {
495
- "title": title,
496
- "description": description,
497
- "location": extracted_data.get("location"),
498
- "bedrooms": extracted_data.get("bedrooms"),
499
- "bathrooms": extracted_data.get("bathrooms"),
500
- "price": str(extracted_data.get("price")), # String for frontend
501
- "price_type": extracted_data.get("price_type"),
502
- "currency": extracted_data.get("currency"),
503
- "listing_type": extracted_data.get("listing_type"),
504
- "amenities": extracted_data.get("amenities", []),
505
- "requirements": extracted_data.get("requirements"),
506
- "images": images, # Array of Cloudflare URLs
507
- }
508
-
509
- logger.info("📦 Built draft JSON", draft=draft)
510
- return draft
511
-
512
-
513
- # ========== MAIN: PROCESS LISTING ==========
514
  async def process_listing(
515
  user_message: str,
516
  user_id: str,
@@ -519,167 +175,109 @@ async def process_listing(
519
  images: Optional[list] = None,
520
  ) -> Dict:
521
  """
522
- Main listing tool function.
523
- Handles the complete listing creation flow.
524
- Uses centralized system prompt - language is auto-detected.
525
-
526
- Returns: {
527
- "success": bool,
528
- "action": str (what to do next),
529
- "reply": str (message to user),
530
- "data": dict (extracted/generated data),
531
- "state": dict (updated conversation state),
532
- "draft": dict (final draft JSON if ready),
533
- }
534
  """
535
 
536
- logger.info("🚀 Processing listing", user_id=user_id, user_role=user_role)
537
 
538
- # Check if user message has data
539
- has_data, _ = await has_listing_data(user_message, user_role)
540
 
541
- # ========== NO DATA - SHOW EXAMPLE ==========
542
- if not has_data:
543
- logger.info("📋 No data in message - showing example")
544
-
545
- example = await generate_listing_example(user_role)
546
-
547
  return {
548
- "success": True,
549
- "action": "show_example",
550
- "reply": example,
551
  "data": {},
552
  "state": {
553
  "status": "listing",
554
- "step": "awaiting_data",
555
- "user_role": user_role,
556
- },
557
  }
558
 
559
- # ========== HAS DATA - EXTRACT ==========
560
- logger.info("✅ Message has data - extracting fields")
561
-
562
- extracted_data = await extract_listing_fields(user_message, user_role)
563
- missing_fields = find_missing_required_fields(extracted_data)
564
 
565
- # ========== MISSING REQUIRED FIELDS - ASK ==========
566
  if missing_fields:
567
- logger.info("⚠️ Missing required fields", missing=missing_fields)
568
-
569
  next_field = missing_fields[0]
570
- question = await generate_question_for_field(next_field, user_role)
571
 
572
  return {
573
  "success": True,
574
  "action": "ask_field",
575
- "reply": question,
576
  "data": extracted_data,
577
  "state": {
578
  "status": "listing",
579
  "step": "collecting_required",
580
  "provided_fields": extracted_data,
581
  "missing_fields": missing_fields,
582
- "user_role": user_role,
583
- },
584
  }
585
 
586
- # ========== ALL REQUIRED FIELDS OK - CHECK IMAGES ==========
587
- logger.info("✅ All required fields present - checking images")
588
-
589
  images = images or []
590
- if not images or len(images) == 0:
591
- logger.info("🚫 No images - asking user to upload")
592
-
593
- upload_prompt = "📸 Great! Now please upload at least one image of the property. Images are REQUIRED for the listing. Reply in user's language (auto-detect)"
594
-
595
- upload_response = await client.chat.completions.create(
596
- model="deepseek-chat",
597
- messages=[
598
- {"role": "system", "content": get_system_prompt(user_role)},
599
- {"role": "user", "content": upload_prompt}
600
- ],
601
- temperature=0.7,
602
- max_tokens=100,
603
- )
604
-
605
- upload_message = upload_response.choices[0].message.content.strip()
606
-
607
  return {
608
  "success": True,
609
  "action": "ask_for_images",
610
- "reply": upload_message,
611
  "data": extracted_data,
612
  "state": {
613
  "status": "listing",
614
  "step": "waiting_for_images",
615
  "provided_fields": extracted_data,
616
- "missing_fields": [],
617
- "user_role": user_role,
618
- },
619
  }
620
 
621
- # ========== IMAGES PRESENT - GENERATE DRAFT ==========
622
- logger.info("🖼️ Images present - generating draft")
623
-
624
- title, description = await generate_title_and_description(extracted_data, user_role)
625
-
626
- draft = build_draft_json(extracted_data, title, description, images)
627
 
628
- ready_msg_prompt = "Say 'Your listing draft is ready! Review it below, then publish, edit, or discard.' in the user's language (auto-detect)"
629
-
630
- ready_response = await client.chat.completions.create(
631
- model="deepseek-chat",
632
- messages=[
633
- {"role": "system", "content": get_system_prompt(user_role)},
634
- {"role": "user", "content": ready_msg_prompt}
635
- ],
636
- temperature=0.7,
637
- max_tokens=100,
638
- )
 
 
 
 
639
 
640
- ready_msg = ready_response.choices[0].message.content.strip()
641
 
642
  return {
643
  "success": True,
644
  "action": "show_draft",
645
- "reply": ready_msg,
646
  "data": extracted_data,
647
  "state": {
648
  "status": "listing",
649
  "step": "preview_ready",
650
  "provided_fields": extracted_data,
651
  "images": images,
652
- "user_role": user_role,
653
  },
654
- "draft": draft, # ← Frontend will display this
655
  }
656
 
657
 
658
- # ========== TEST ==========
659
  async def test():
660
- """Test the listing tool"""
661
 
662
- print("\n" + "="*70)
663
- print("🧪 LISTING TOOL TEST")
664
- print("="*70 + "\n")
665
 
666
- # Test 1: No data
667
- print("1️⃣ Test: User message with NO data")
668
- result = await process_listing(
669
- user_message="Hi, how does this work?",
670
- user_id="test_user",
671
- user_role="landlord",
672
- )
673
- print(f"Action: {result['action']}")
674
- print(f"Reply: {result['reply'][:80]}...\n")
675
-
676
- # Test 2: With data
677
- print("2️⃣ Test: User message WITH data")
678
  result = await process_listing(
679
  user_message="I have a 2-bed, 1-bath apartment in Lagos for 50k monthly with wifi",
680
  user_id="test_user",
681
  user_role="landlord",
682
  )
 
683
  print(f"Action: {result['action']}")
684
  print(f"Data: {result['data']}\n")
685
 
 
1
+ # app/ai/tools/listing_tool.py
2
+ # UPGRADED: LangChain with Pydantic Output Parser (FIXED IMPORTS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  import json
5
  import re
6
  from typing import Dict, Optional, Tuple
7
+ from pydantic import BaseModel, Field
8
  from structlog import get_logger
9
+ from langchain_openai import ChatOpenAI
10
+ from langchain_core.prompts import ChatPromptTemplate
11
+ from langchain_core.output_parsers import PydanticOutputParser
12
+ from langchain_core.messages import SystemMessage, HumanMessage
13
+
14
  from app.config import settings
15
  from app.ml.models.ml_listing_extractor import get_ml_extractor
16
+ from app.ai.prompts.system_prompt import get_system_prompt
17
 
18
  logger = get_logger(__name__)
19
 
 
 
 
 
 
20
  ml_extractor = get_ml_extractor()
21
 
22
+ # REQUIRED FIELDS
23
  REQUIRED_FIELDS = ["location", "bedrooms", "bathrooms", "price"]
24
 
25
 
26
+ # PYDANTIC MODELS FOR STRUCTURED OUTPUT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ class ExtractedListingFields(BaseModel):
29
+ """Structured listing fields from LLM extraction"""
30
+ location: Optional[str] = Field(None, description="City or area name")
31
+ bedrooms: Optional[int] = Field(None, description="Number of bedrooms")
32
+ bathrooms: Optional[int] = Field(None, description="Number of bathrooms")
33
+ price: Optional[float] = Field(None, description="Price amount")
34
+ price_type_raw: Optional[str] = Field(None, description="Raw price frequency text")
35
+ amenities: list = Field(default_factory=list, description="List of amenities")
36
+ requirements: Optional[str] = Field(None, description="Special requirements")
37
+ listing_type_raw: Optional[str] = Field(None, description="Raw listing type indication")
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ class ListingDraftOutput(BaseModel):
41
+ """Structured listing draft output"""
42
+ title: str = Field(description="Property title (under 60 chars)")
43
+ description: str = Field(description="2-3 sentence property description")
 
 
44
 
 
 
45
 
46
+ # INITIALIZE LLM
47
+ llm = ChatOpenAI(
48
+ api_key=settings.DEEPSEEK_API_KEY,
49
+ base_url=settings.DEEPSEEK_BASE_URL,
50
+ model="deepseek-chat",
51
+ temperature=0.3, # Lower temp for structured output
52
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
 
55
+ # EXTRACTION WITH PYDANTIC PARSER
56
+ async def extract_listing_fields_langchain(
57
  user_message: str,
58
  user_role: str,
59
  ) -> Dict:
60
  """
61
+ Extract listing fields using LangChain with Pydantic parser.
 
 
 
62
  """
63
 
64
+ logger.info("Extracting listing fields with LangChain", user_role=user_role)
65
 
66
+ try:
67
+ # Create parser
68
+ parser = PydanticOutputParser(pydantic_object=ExtractedListingFields)
69
+
70
+ # Create prompt
71
+ prompt_text = f"""Extract property listing fields from this message.
72
 
73
  User role: {user_role}
74
+ User message: {user_message}
 
 
 
 
 
 
75
 
76
+ Be smart about understanding intent despite typos or informal language.
 
 
 
 
 
 
 
 
 
 
77
 
78
+ {parser.get_format_instructions()}"""
 
 
 
 
 
 
 
 
 
79
 
80
+ # Get system prompt
81
+ system_prompt = get_system_prompt(user_role=user_role)
 
82
 
83
+ # Create messages
84
+ messages = [
85
+ SystemMessage(content=system_prompt),
86
+ HumanMessage(content=prompt_text)
87
+ ]
88
 
89
+ # Invoke LLM
90
+ response = await llm.ainvoke(messages)
 
91
 
92
+ # Extract text
93
+ response_text = response.content if hasattr(response, 'content') else str(response)
94
 
95
+ # Parse output
96
+ extracted = parser.parse(response_text)
 
 
 
 
97
 
98
+ logger.info("Extraction successful", extracted=extracted.dict())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
+ return extracted.dict()
 
101
 
102
  except Exception as e:
103
+ logger.error("Extraction failed", exc_info=e)
104
  return {}
105
 
106
 
107
+ # TITLE & DESCRIPTION GENERATION
108
+ async def generate_title_and_description_langchain(
109
+ extracted_data: Dict,
110
+ user_role: str = "landlord",
111
+ ) -> Tuple[str, str]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  """
113
+ Generate title and description using LangChain with Pydantic parser.
 
 
 
114
  """
115
 
116
+ logger.info("Generating title and description with LangChain")
 
 
 
 
 
 
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  try:
119
+ # Create parser
120
+ parser = PydanticOutputParser(pydantic_object=ListingDraftOutput)
 
 
 
 
 
 
 
121
 
122
+ # Prepare data
123
+ bedrooms = extracted_data.get("bedrooms", "?")
124
+ location = extracted_data.get("location", "Unknown")
125
+ listing_type = extracted_data.get("listing_type", "property")
126
+ price = extracted_data.get("price")
127
+ price_type = extracted_data.get("price_type", "monthly")
128
+ currency = extracted_data.get("currency", "")
129
+ amenities_list = extracted_data.get("amenities", [])
130
 
131
+ amenities_str = ", ".join(amenities_list) if amenities_list else "None specified"
 
 
132
 
133
+ # Create prompt
134
+ prompt_text = f"""Generate a professional title and description for a property listing.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  Details:
137
  - Bedrooms: {bedrooms}
 
139
  - Listing type: {listing_type}
140
  - Price: {price} {currency}/{price_type}
141
  - Amenities: {amenities_str}
 
 
 
 
 
 
 
142
 
143
+ {parser.get_format_instructions()}"""
144
+
145
+ # Create messages
146
+ messages = [
147
+ SystemMessage(content="You are a professional real estate listing writer."),
148
+ HumanMessage(content=prompt_text)
149
+ ]
150
+
151
+ # Invoke LLM
152
+ response = await llm.ainvoke(messages)
153
 
154
+ # Extract text
155
+ response_text = response.content if hasattr(response, 'content') else str(response)
 
156
 
157
+ # Parse output
158
+ output = parser.parse(response_text)
159
 
160
+ logger.info("Title and description generated", output=output.dict())
161
+
162
+ return output.title, output.description
163
 
164
  except Exception as e:
165
+ logger.error("Generation failed", exc_info=e)
166
  return f"{bedrooms}-Bedroom {listing_type} in {location}", "Beautiful property"
167
 
168
 
169
+ # MAIN PROCESS LISTING
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  async def process_listing(
171
  user_message: str,
172
  user_id: str,
 
175
  images: Optional[list] = None,
176
  ) -> Dict:
177
  """
178
+ Process listing using LangChain with Pydantic validation.
 
 
 
 
 
 
 
 
 
 
 
179
  """
180
 
181
+ logger.info("Processing listing with LangChain", user_id=user_id, user_role=user_role)
182
 
183
+ # Extract fields with LangChain
184
+ extracted_data = await extract_listing_fields_langchain(user_message, user_role)
185
 
186
+ if not extracted_data:
 
 
 
 
 
187
  return {
188
+ "success": False,
189
+ "action": "error",
190
+ "reply": "Could not extract listing information. Please describe your property.",
191
  "data": {},
192
  "state": {
193
  "status": "listing",
194
+ "error": "extraction_failed"
195
+ }
 
196
  }
197
 
198
+ # Check for missing required fields
199
+ missing_fields = [f for f in REQUIRED_FIELDS if not extracted_data.get(f)]
 
 
 
200
 
 
201
  if missing_fields:
202
+ logger.info("Missing required fields", missing=missing_fields)
 
203
  next_field = missing_fields[0]
 
204
 
205
  return {
206
  "success": True,
207
  "action": "ask_field",
208
+ "reply": f"I need the {next_field} to complete your listing.",
209
  "data": extracted_data,
210
  "state": {
211
  "status": "listing",
212
  "step": "collecting_required",
213
  "provided_fields": extracted_data,
214
  "missing_fields": missing_fields,
215
+ }
 
216
  }
217
 
218
+ # Check for images
 
 
219
  images = images or []
220
+ if not images:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  return {
222
  "success": True,
223
  "action": "ask_for_images",
224
+ "reply": "Great! Now please upload at least one image of the property. Images are REQUIRED for the listing.",
225
  "data": extracted_data,
226
  "state": {
227
  "status": "listing",
228
  "step": "waiting_for_images",
229
  "provided_fields": extracted_data,
230
+ }
 
 
231
  }
232
 
233
+ # Generate title and description
234
+ title, description = await generate_title_and_description_langchain(extracted_data, user_role)
 
 
 
 
235
 
236
+ # Build draft
237
+ draft = {
238
+ "title": title,
239
+ "description": description,
240
+ "location": extracted_data.get("location"),
241
+ "bedrooms": extracted_data.get("bedrooms"),
242
+ "bathrooms": extracted_data.get("bathrooms"),
243
+ "price": str(extracted_data.get("price")),
244
+ "price_type": extracted_data.get("price_type"),
245
+ "currency": extracted_data.get("currency"),
246
+ "listing_type": extracted_data.get("listing_type"),
247
+ "amenities": extracted_data.get("amenities", []),
248
+ "requirements": extracted_data.get("requirements"),
249
+ "images": images,
250
+ }
251
 
252
+ logger.info("Draft ready", draft=draft)
253
 
254
  return {
255
  "success": True,
256
  "action": "show_draft",
257
+ "reply": "Your listing draft is ready! Review it below, then publish, edit, or discard.",
258
  "data": extracted_data,
259
  "state": {
260
  "status": "listing",
261
  "step": "preview_ready",
262
  "provided_fields": extracted_data,
263
  "images": images,
 
264
  },
265
+ "draft": draft,
266
  }
267
 
268
 
269
+ # TEST
270
  async def test():
271
+ """Test the LangChain listing tool"""
272
 
273
+ print("\nTesting LangChain Listing Tool\n" + "="*70 + "\n")
 
 
274
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  result = await process_listing(
276
  user_message="I have a 2-bed, 1-bath apartment in Lagos for 50k monthly with wifi",
277
  user_id="test_user",
278
  user_role="landlord",
279
  )
280
+
281
  print(f"Action: {result['action']}")
282
  print(f"Data: {result['data']}\n")
283
 
requirements.txt CHANGED
@@ -1,81 +1,76 @@
1
- # --- Core HTTP & SSL Dependencies ---
 
 
 
 
 
2
  requests>=2.32.0
3
  urllib3>=1.26.14,<2.1.0
4
  certifi>=2023.7.22
5
  idna>=3.7
6
  charset-normalizer>=3.2.0
 
 
7
 
8
- # --- FastAPI & Web Framework ---
9
- fastapi==0.104.1
10
- uvicorn[standard]==0.24.0
11
- python-multipart==0.0.6
12
-
13
- # --- Database & ORM ---
14
- motor==3.3.2
15
- pymongo==4.6.1
16
 
17
- # --- Data Validation & Settings ---
18
- pydantic>=2.5.3
 
19
  pydantic-settings>=2.1.0
20
- python-dotenv==1.0.0
21
-
22
- # --- Authentication & Security ---
23
- PyJWT==2.10.1
24
- bcrypt==4.1.1
25
- passlib==1.7.4
26
- python-jose==3.3.0
27
- email-validator==2.1.0
28
 
29
- # --- Email Service ---
30
- resend>=0.7.0
 
31
 
32
- # --- Caching & Vector Store ---
33
- redis>=5.0.0
34
- qdrant-client>=1.7.0
35
 
36
- # --- AI Agent Stack ---
37
- langgraph>=0.1.0
38
- langchain-core>=0.1.0
39
- openai>=1.0.0
40
- aiohttp>=3.9.0
41
- tenacity>=8.0
42
- structlog>=23.0
43
- websockets>=12.0
44
 
45
- # --- Monitoring & Logging ---
46
- sentry-sdk[fastapi]>=1.40.0
 
 
 
 
47
 
48
- # --- OpenTelemetry (OBSERVABILITY) ---
49
- opentelemetry-api>=1.22.0
50
- opentelemetry-sdk>=1.22.0
51
- opentelemetry-exporter-otlp>=1.22.0
52
- opentelemetry-instrumentation>=0.43b0
53
- opentelemetry-instrumentation-fastapi>=0.43b0
54
- opentelemetry-instrumentation-httpx>=0.43b0
55
- opentelemetry-instrumentation-requests>=0.43b0
56
- opentelemetry-instrumentation-redis>=0.43b0
57
- opentelemetry-semantic-conventions>=0.43b0
58
 
59
- # --- Token Counting ---
60
- tiktoken>=0.5.0
 
61
 
62
- # --- ML/AI Dependencies (UPDATED FOR PYTHON 3.13 COMPATIBILITY) ---
63
- torch==2.9.1
64
- transformers>=4.37.2,<5.0
65
- sentence-transformers==2.7.0
66
- scikit-learn>=1.3.2
67
- joblib>=1.3.2
68
- huggingface-hub>=0.16.4,<1.0
69
 
70
- # --- Computer Vision & Image Processing ---
71
- pillow>=10.0.0
72
- opencv-python>=4.8.0.74
 
73
 
74
- # --- Data Processing ---
75
- numpy>=1.26.0
76
- pandas>=2.1.3
77
 
78
- # --- Development (Optional) ---
79
- pytest>=7.4.0
80
- pytest-asyncio>=0.21.0
81
- httpx>=0.25.0
 
 
 
 
 
 
1
+ # ============================================================
2
+ # requirements.txt - Lojiz Platform + Aida AI (FIXED & TESTED)
3
+ # Python 3.13 Compatible
4
+ # ============================================================
5
+
6
+ # HTTP & Requests
7
  requests>=2.32.0
8
  urllib3>=1.26.14,<2.1.0
9
  certifi>=2023.7.22
10
  idna>=3.7
11
  charset-normalizer>=3.2.0
12
+ httpx>=0.25.1
13
+ aiohttp>=3.9.1
14
 
15
+ # Web Framework & ASGI
16
+ fastapi>=0.104.1
17
+ uvicorn[standard]>=0.24.0
18
+ python-multipart>=0.0.6
 
 
 
 
19
 
20
+ # Serialization & Validation
21
+ marshmallow>=3.20.1
22
+ pydantic>=2.5.0
23
  pydantic-settings>=2.1.0
 
 
 
 
 
 
 
 
24
 
25
+ # Database - MongoDB
26
+ motor>=3.3.2
27
+ pymongo>=4.6.1
28
 
29
+ # Cache & Memory - Redis
30
+ redis>=5.0.1
 
31
 
32
+ # Vector Database - Qdrant
33
+ qdrant-client>=1.16.1
 
 
 
 
 
 
34
 
35
+ # ML & Data Processing
36
+ scikit-learn>=1.4.0
37
+ numpy>=1.26.0
38
+ pandas>=2.1.3
39
+ joblib>=1.3.2
40
+ sentence-transformers>=2.2.2
41
 
42
+ # LLM & LangChain Framework
43
+ langchain>=0.1.0
44
+ langchain-openai>=0.1.0
45
+ langchain-core>=0.1.0
46
+ langchain-community>=0.0.10
47
+ openai>=1.3.0
48
+ langsmith>=0.0.66
 
 
 
49
 
50
+ # Logging & Monitoring
51
+ structlog>=23.2.0
52
+ python-json-logger>=2.0.7
53
 
54
+ # Security & Authentication
55
+ PyJWT>=2.10.0
56
+ bcrypt>=4.1.1
57
+ python-jose[cryptography]>=3.3.0
58
+ cryptography>=41.0.0
 
 
59
 
60
+ # Utilities
61
+ python-dotenv>=1.0.0
62
+ python-dateutil>=2.8.2
63
+ Pillow>=11.0.0
64
 
65
+ # Production Server
66
+ gunicorn>=21.2.0
 
67
 
68
+ # ============================================================
69
+ # INSTALLATION:
70
+ # pip install -r requirements.txt
71
+ #
72
+ # NOTES:
73
+ # - All versions tested with Python 3.13
74
+ # - All packages have pre-compiled wheels (no build tools needed)
75
+ # - Remove version pins if you want latest compatible versions
76
+ # ============================================================