Spaces:
Running
Running
Commit
Β·
15213e6
1
Parent(s):
9cfd5d9
agentic
Browse files- app.py +15 -4
- app/ai/config.py +246 -50
- app/ai/intent_detector.py +249 -0
- app/ai/memory/redis_context_memory.py +462 -0
- app/ai/modules/draft_preview.py +377 -0
- app/ai/modules/image_upload.py +377 -0
- app/ai/modules/post_draft_handler.py +498 -0
- app/ai/modules/property_listing.py +547 -0
- app/ai/nodes/__init__.py +0 -0
- app/ai/nodes/__pycache__/__init__.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/__init__.cpython-313.pyc +0 -0
- app/ai/nodes/__pycache__/discard_node.cpython-313.pyc +0 -0
- app/ai/nodes/__pycache__/draft_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/draft_node.cpython-313.pyc +0 -0
- app/ai/nodes/__pycache__/edit_node.cpython-313.pyc +0 -0
- app/ai/nodes/__pycache__/image_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/intent_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/intent_node.cpython-313.pyc +0 -0
- app/ai/nodes/__pycache__/publish_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/publish_node.cpython-313.pyc +0 -0
- app/ai/nodes/__pycache__/role_gate_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/role_gate_node.cpython-313.pyc +0 -0
- app/ai/nodes/__pycache__/search_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/search_node.cpython-313.pyc +0 -0
- app/ai/nodes/discard_node.py +0 -19
- app/ai/nodes/draft_node.py +0 -275
- app/ai/nodes/edit_node.py +0 -36
- app/ai/nodes/image_node.py +0 -63
- app/ai/nodes/intent_node.py +0 -423
- app/ai/nodes/publish_node.py +0 -53
- app/ai/nodes/role_gate_node.py +0 -51
- app/ai/nodes/search_node.py +0 -355
- app/ai/routes/chat.py +436 -65
- app/ai/service.py +0 -90
- app/ai/services/dynamic_role_manager.py +0 -90
- app/ai/services/property_listing_service.py +470 -0
- app/ai/state.py +0 -53
- main.py +227 -115
app.py
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
|
|
| 1 |
"""
|
| 2 |
-
|
| 3 |
-
Runs the FastAPI server
|
| 4 |
"""
|
| 5 |
|
| 6 |
import sys
|
| 7 |
import os
|
|
|
|
| 8 |
|
| 9 |
# Add current directory to path for imports
|
| 10 |
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
@@ -13,7 +15,11 @@ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
| 13 |
from dotenv import load_dotenv
|
| 14 |
load_dotenv()
|
| 15 |
|
| 16 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
from main import app
|
| 18 |
|
| 19 |
if __name__ == "__main__":
|
|
@@ -23,7 +29,12 @@ if __name__ == "__main__":
|
|
| 23 |
port = int(os.getenv("PORT", 7860))
|
| 24 |
host = "0.0.0.0" # Listen on all interfaces
|
| 25 |
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
uvicorn.run(
|
| 29 |
app,
|
|
|
|
| 1 |
+
# app.py - Entry point for Hugging Face Spaces deployment
|
| 2 |
"""
|
| 3 |
+
Entry point for HF Spaces deployment
|
| 4 |
+
Runs the FastAPI server with all new Aida modules
|
| 5 |
"""
|
| 6 |
|
| 7 |
import sys
|
| 8 |
import os
|
| 9 |
+
import logging
|
| 10 |
|
| 11 |
# Add current directory to path for imports
|
| 12 |
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
| 15 |
from dotenv import load_dotenv
|
| 16 |
load_dotenv()
|
| 17 |
|
| 18 |
+
# Configure logging
|
| 19 |
+
logging.basicConfig(level=logging.INFO)
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
# Import FastAPI app
|
| 23 |
from main import app
|
| 24 |
|
| 25 |
if __name__ == "__main__":
|
|
|
|
| 29 |
port = int(os.getenv("PORT", 7860))
|
| 30 |
host = "0.0.0.0" # Listen on all interfaces
|
| 31 |
|
| 32 |
+
logger.info("=" * 70)
|
| 33 |
+
logger.info("π Starting Lojiz Platform + Aida AI")
|
| 34 |
+
logger.info(f" Host: {host}")
|
| 35 |
+
logger.info(f" Port: {port}")
|
| 36 |
+
logger.info(f" Environment: {os.getenv('ENVIRONMENT', 'development')}")
|
| 37 |
+
logger.info("=" * 70)
|
| 38 |
|
| 39 |
uvicorn.run(
|
| 40 |
app,
|
app/ai/config.py
CHANGED
|
@@ -1,66 +1,262 @@
|
|
| 1 |
-
# app/ai/config.py -
|
| 2 |
"""
|
| 3 |
-
|
| 4 |
-
-
|
| 5 |
-
-
|
| 6 |
-
-
|
| 7 |
-
-
|
| 8 |
-
-
|
|
|
|
| 9 |
"""
|
| 10 |
|
| 11 |
import os
|
| 12 |
-
import
|
| 13 |
from redis.asyncio import Redis
|
| 14 |
from qdrant_client import AsyncQdrantClient
|
| 15 |
-
import
|
| 16 |
from structlog import get_logger
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
logger = get_logger(__name__)
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
|
| 25 |
-
raise RuntimeError("DEEPSEEK_API_KEY not set")
|
| 26 |
|
| 27 |
-
|
|
|
|
|
|
|
| 28 |
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333")
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
try:
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
traces_sample_rate=0.2,
|
| 55 |
-
profiles_sample_rate=0.2,
|
| 56 |
-
environment=os.getenv("ENVIRONMENT", "production"),
|
| 57 |
-
# β
FIXED: Don't auto-enable all integrations
|
| 58 |
-
# β
FIXED: Only use what we need
|
| 59 |
-
integrations=[], # Let Sentry choose safe defaults
|
| 60 |
-
)
|
| 61 |
-
logger.info("β
Sentry AI observability enabled")
|
| 62 |
except Exception as e:
|
| 63 |
-
logger.
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/config.py - Updated with all new modules
|
| 2 |
"""
|
| 3 |
+
Configuration and initialization for Aida AI system with all new modules:
|
| 4 |
+
- Intent detection
|
| 5 |
+
- Property listing service
|
| 6 |
+
- Image upload (Cloudflare)
|
| 7 |
+
- Draft & preview
|
| 8 |
+
- Redis memory
|
| 9 |
+
- Post-draft handling
|
| 10 |
"""
|
| 11 |
|
| 12 |
import os
|
| 13 |
+
import logging
|
| 14 |
from redis.asyncio import Redis
|
| 15 |
from qdrant_client import AsyncQdrantClient
|
| 16 |
+
from openai import AsyncOpenAI
|
| 17 |
from structlog import get_logger
|
| 18 |
+
from dotenv import load_dotenv
|
| 19 |
+
|
| 20 |
+
# Load environment variables
|
| 21 |
+
load_dotenv()
|
| 22 |
|
| 23 |
logger = get_logger(__name__)
|
| 24 |
|
| 25 |
+
# ========== ENVIRONMENT VARIABLES ==========
|
| 26 |
+
DEBUG = os.getenv("DEBUG", "False").lower() == "true"
|
| 27 |
+
ENVIRONMENT = os.getenv("ENVIRONMENT", "development")
|
| 28 |
+
|
| 29 |
+
# ========== DEEPSEEK (LLM) ==========
|
| 30 |
+
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
|
| 31 |
+
DEEPSEEK_BASE_URL = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
|
| 32 |
+
|
| 33 |
+
if not DEEPSEEK_API_KEY:
|
| 34 |
+
raise RuntimeError("DEEPSEEK_API_KEY not set in .env")
|
| 35 |
|
| 36 |
+
deepseek_client = AsyncOpenAI(api_key=DEEPSEEK_API_KEY, base_url=DEEPSEEK_BASE_URL)
|
|
|
|
| 37 |
|
| 38 |
+
logger.info("β
DeepSeek client initialized")
|
| 39 |
+
|
| 40 |
+
# ========== REDIS ==========
|
| 41 |
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
|
| 42 |
+
|
| 43 |
+
try:
|
| 44 |
+
redis_client = Redis.from_url(
|
| 45 |
+
REDIS_URL,
|
| 46 |
+
decode_responses=True,
|
| 47 |
+
socket_connect_timeout=5,
|
| 48 |
+
socket_timeout=5,
|
| 49 |
+
max_connections=50,
|
| 50 |
+
)
|
| 51 |
+
logger.info("β
Redis client configured", url=REDIS_URL[:30] + "...")
|
| 52 |
+
except Exception as e:
|
| 53 |
+
logger.error("β Redis configuration failed", exc_info=e)
|
| 54 |
+
redis_client = None
|
| 55 |
+
|
| 56 |
+
# ========== QDRANT (Vector Store) ==========
|
| 57 |
QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333")
|
| 58 |
+
QDRANT_API_KEY = os.getenv("QDRANT_API_KEY")
|
| 59 |
+
|
| 60 |
+
try:
|
| 61 |
+
qdrant_client = AsyncQdrantClient(
|
| 62 |
+
url=QDRANT_URL,
|
| 63 |
+
api_key=QDRANT_API_KEY,
|
| 64 |
+
https=True if "https" in QDRANT_URL else False,
|
| 65 |
+
timeout=60,
|
| 66 |
+
)
|
| 67 |
+
logger.info("β
Qdrant client configured", url=QDRANT_URL[:30] + "...")
|
| 68 |
+
except Exception as e:
|
| 69 |
+
logger.error("β Qdrant configuration failed", exc_info=e)
|
| 70 |
+
qdrant_client = None
|
| 71 |
+
|
| 72 |
+
# ========== CLOUDFLARE IMAGES ==========
|
| 73 |
+
CF_ACCOUNT_ID = os.getenv("CF_ACCOUNT_ID")
|
| 74 |
+
CF_API_TOKEN = os.getenv("CF_API_TOKEN")
|
| 75 |
+
|
| 76 |
+
if not CF_ACCOUNT_ID or not CF_API_TOKEN:
|
| 77 |
+
logger.warning("β οΈ Cloudflare credentials not set - image upload will fail")
|
| 78 |
+
|
| 79 |
+
logger.info("β
Cloudflare configured", account_id=CF_ACCOUNT_ID[:10] + "..." if CF_ACCOUNT_ID else "Not set")
|
| 80 |
+
|
| 81 |
+
# ========== OPENROUTER (Embeddings) ==========
|
| 82 |
+
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
|
| 83 |
+
|
| 84 |
+
if not OPENROUTER_API_KEY:
|
| 85 |
+
logger.warning("β οΈ OpenRouter API key not set - embeddings will fail")
|
| 86 |
+
|
| 87 |
+
logger.info("β
OpenRouter configured" if OPENROUTER_API_KEY else "β οΈ OpenRouter not configured")
|
| 88 |
+
|
| 89 |
+
# ========== ML EXTRACTOR ==========
|
| 90 |
+
try:
|
| 91 |
+
from app.ml.models.ml_listing_extractor import get_ml_extractor
|
| 92 |
+
|
| 93 |
+
ml_extractor = get_ml_extractor()
|
| 94 |
+
logger.info("β
ML Extractor initialized")
|
| 95 |
+
except Exception as e:
|
| 96 |
+
logger.warning(f"β οΈ ML Extractor initialization failed: {e}")
|
| 97 |
+
ml_extractor = None
|
| 98 |
+
|
| 99 |
+
# ========== MEMORY MANAGERS ==========
|
| 100 |
+
try:
|
| 101 |
+
from app.ai.memory.redis_context_memory import get_memory_manager
|
| 102 |
+
|
| 103 |
+
memory_manager = get_memory_manager()
|
| 104 |
+
logger.info("β
Redis Memory Manager initialized")
|
| 105 |
+
except Exception as e:
|
| 106 |
+
logger.error(f"β Memory Manager initialization failed: {e}")
|
| 107 |
+
memory_manager = None
|
| 108 |
+
|
| 109 |
+
# ========== INTENT DETECTOR ==========
|
| 110 |
+
try:
|
| 111 |
+
from app.ai.intent_detector import detect_intent_with_llm, build_smart_response
|
| 112 |
+
|
| 113 |
+
logger.info("β
Intent Detector loaded")
|
| 114 |
+
except Exception as e:
|
| 115 |
+
logger.error(f"β Intent Detector initialization failed: {e}")
|
| 116 |
+
|
| 117 |
+
# ========== MODULES ==========
|
| 118 |
+
try:
|
| 119 |
+
from app.ai.modules.property_listing import (
|
| 120 |
+
start_listing,
|
| 121 |
+
continue_listing,
|
| 122 |
+
build_listing_state,
|
| 123 |
+
generate_draft,
|
| 124 |
+
)
|
| 125 |
+
logger.info("β
Property Listing module loaded")
|
| 126 |
+
except Exception as e:
|
| 127 |
+
logger.error(f"β Property Listing module initialization failed: {e}")
|
| 128 |
+
|
| 129 |
+
try:
|
| 130 |
+
from app.ai.modules.image_upload import (
|
| 131 |
+
upload_image_to_cloudflare,
|
| 132 |
+
process_image_upload,
|
| 133 |
+
has_images,
|
| 134 |
+
get_images_status,
|
| 135 |
+
)
|
| 136 |
+
logger.info("β
Image Upload module loaded")
|
| 137 |
+
except Exception as e:
|
| 138 |
+
logger.error(f"β Image Upload module initialization failed: {e}")
|
| 139 |
+
|
| 140 |
+
try:
|
| 141 |
+
from app.ai.modules.draft_preview import (
|
| 142 |
+
generate_and_show_preview,
|
| 143 |
+
handle_edit_field,
|
| 144 |
+
update_draft_field,
|
| 145 |
+
publish_draft,
|
| 146 |
+
discard_draft,
|
| 147 |
+
)
|
| 148 |
+
logger.info("β
Draft & Preview module loaded")
|
| 149 |
+
except Exception as e:
|
| 150 |
+
logger.error(f"β Draft & Preview module initialization failed: {e}")
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
from app.ai.modules.post_draft_handler import (
|
| 154 |
+
handle_post_draft_action,
|
| 155 |
+
detect_post_draft_action,
|
| 156 |
+
detect_field_to_edit,
|
| 157 |
+
publish_draft_to_mongodb,
|
| 158 |
+
)
|
| 159 |
+
logger.info("β
Post-Draft Handler module loaded")
|
| 160 |
+
except Exception as e:
|
| 161 |
+
logger.error(f"β Post-Draft Handler module initialization failed: {e}")
|
| 162 |
+
|
| 163 |
+
# ========== SERVICE ==========
|
| 164 |
+
try:
|
| 165 |
+
from app.ai.services.property_listing_service import PropertyListingService
|
| 166 |
+
|
| 167 |
+
listing_service = PropertyListingService()
|
| 168 |
+
logger.info("β
Property Listing Service initialized")
|
| 169 |
+
except Exception as e:
|
| 170 |
+
logger.error(f"β Property Listing Service initialization failed: {e}")
|
| 171 |
+
listing_service = None
|
| 172 |
+
|
| 173 |
+
# ========== ROUTES ==========
|
| 174 |
+
try:
|
| 175 |
+
from app.ai.routes.chat import router as chat_router
|
| 176 |
+
|
| 177 |
+
logger.info("β
Chat routes loaded")
|
| 178 |
+
except Exception as e:
|
| 179 |
+
logger.error(f"β Chat routes initialization failed: {e}")
|
| 180 |
+
|
| 181 |
+
# ========== STARTUP/SHUTDOWN CHECKS ==========
|
| 182 |
+
async def validate_ai_startup():
|
| 183 |
+
"""Validate AI system at startup"""
|
| 184 |
+
|
| 185 |
+
logger.info("π Validating AI system startup...")
|
| 186 |
+
|
| 187 |
+
checks = {
|
| 188 |
+
"DeepSeek": DEEPSEEK_API_KEY is not None,
|
| 189 |
+
"Redis": redis_client is not None,
|
| 190 |
+
"Qdrant": qdrant_client is not None,
|
| 191 |
+
"Cloudflare": CF_ACCOUNT_ID is not None and CF_API_TOKEN is not None,
|
| 192 |
+
"OpenRouter": OPENROUTER_API_KEY is not None,
|
| 193 |
+
"ML Extractor": ml_extractor is not None,
|
| 194 |
+
"Memory Manager": memory_manager is not None,
|
| 195 |
+
"Listing Service": listing_service is not None,
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
failed = [name for name, status in checks.items() if not status]
|
| 199 |
+
|
| 200 |
+
if failed:
|
| 201 |
+
logger.warning(f"β οΈ Some components failed to initialize: {', '.join(failed)}")
|
| 202 |
+
else:
|
| 203 |
+
logger.info("β
All AI components ready!")
|
| 204 |
+
|
| 205 |
+
return checks
|
| 206 |
+
|
| 207 |
+
# ========== REDIS HEALTH CHECK ==========
|
| 208 |
+
async def check_redis_health():
|
| 209 |
+
"""Check Redis connection health"""
|
| 210 |
+
|
| 211 |
+
try:
|
| 212 |
+
if redis_client:
|
| 213 |
+
await redis_client.ping()
|
| 214 |
+
logger.info("β
Redis is healthy")
|
| 215 |
+
return True
|
| 216 |
+
except Exception as e:
|
| 217 |
+
logger.error("β Redis connection failed", exc_info=e)
|
| 218 |
+
return False
|
| 219 |
+
|
| 220 |
+
# ========== QDRANT HEALTH CHECK ==========
|
| 221 |
+
async def check_qdrant_health():
|
| 222 |
+
"""Check Qdrant connection health"""
|
| 223 |
+
|
| 224 |
try:
|
| 225 |
+
if qdrant_client:
|
| 226 |
+
await qdrant_client.get_collections()
|
| 227 |
+
logger.info("β
Qdrant is healthy")
|
| 228 |
+
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
except Exception as e:
|
| 230 |
+
logger.error("β Qdrant connection failed", exc_info=e)
|
| 231 |
+
return False
|
| 232 |
+
|
| 233 |
+
# ========== SUMMARY ==========
|
| 234 |
+
"""
|
| 235 |
+
Configuration Summary:
|
| 236 |
+
|
| 237 |
+
External Services:
|
| 238 |
+
- DeepSeek (LLM): β
|
| 239 |
+
- Redis: β
|
| 240 |
+
- Qdrant (Vector DB): β
|
| 241 |
+
- Cloudflare Images: β
|
| 242 |
+
- OpenRouter (Embeddings): β
|
| 243 |
+
|
| 244 |
+
AI Modules:
|
| 245 |
+
- Intent Detector: β
|
| 246 |
+
- Property Listing: β
|
| 247 |
+
- Image Upload: β
|
| 248 |
+
- Draft & Preview: β
|
| 249 |
+
- Post-Draft Handler: β
|
| 250 |
+
|
| 251 |
+
Services:
|
| 252 |
+
- Property Listing Service: β
|
| 253 |
+
|
| 254 |
+
Memory:
|
| 255 |
+
- Redis Context Memory: β
|
| 256 |
+
- Memory Manager: β
|
| 257 |
+
|
| 258 |
+
Routes:
|
| 259 |
+
- Chat Routes: β
|
| 260 |
+
|
| 261 |
+
Ready to use!
|
| 262 |
+
"""
|
app/ai/intent_detector.py
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/intent_detector.py - LLM-Based Intent Detection (Simple & Natural)
|
| 2 |
+
"""
|
| 3 |
+
Use LLM to understand intent - no hardcoding
|
| 4 |
+
Aida knows what she can do, LLM figures out what user wants
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import re
|
| 9 |
+
from typing import Dict, Optional, Tuple
|
| 10 |
+
from openai import AsyncOpenAI
|
| 11 |
+
from structlog import get_logger
|
| 12 |
+
from app.config import settings
|
| 13 |
+
|
| 14 |
+
logger = get_logger(__name__)
|
| 15 |
+
|
| 16 |
+
client = AsyncOpenAI(api_key=settings.DEEPSEEK_API_KEY, base_url=settings.DEEPSEEK_BASE_URL)
|
| 17 |
+
|
| 18 |
+
# ========== WHAT AIDA CAN DO ==========
|
| 19 |
+
AIDA_CAPABILITIES = """
|
| 20 |
+
Aida can help users with:
|
| 21 |
+
|
| 22 |
+
1. **LIST_PROPERTY** - User wants to create/post/list a property for rent, short-stay, sale, or roommate
|
| 23 |
+
2. **SEARCH_PROPERTY** - User wants to find/search/browse properties
|
| 24 |
+
3. **VIEW_LISTINGS** - User wants to see their own posted properties
|
| 25 |
+
4. **EDIT_LISTING** - User wants to edit an existing listing
|
| 26 |
+
5. **DELETE_LISTING** - User wants to delete a listing
|
| 27 |
+
6. **PUBLISH_DRAFT** - User says "publish", "confirm", "yes", "ok" after seeing draft preview
|
| 28 |
+
7. **DISCARD_DRAFT** - User says "discard", "cancel", "delete" to abandon draft
|
| 29 |
+
8. **EDIT_FIELD** - User wants to change a specific field (price, location, amenities, etc) during listing creation
|
| 30 |
+
9. **CONFIRM_AMENITIES** - User provides amenities or requirements for a listing
|
| 31 |
+
10. **VOICE_CALL** - User wants to call/chat with a real estate agent
|
| 32 |
+
11. **CASUAL_CHAT** - User is having normal conversation, not related to above tasks
|
| 33 |
+
|
| 34 |
+
Also:
|
| 35 |
+
- Aida auto-detects user's language and responds in it
|
| 36 |
+
- Aida is friendly, natural, NOT robotic
|
| 37 |
+
- Aida can have normal conversation while still helping with real estate tasks
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
# ========== SIMPLE LLM-BASED INTENT DETECTION ==========
|
| 41 |
+
async def detect_intent_with_llm(
|
| 42 |
+
user_message: str,
|
| 43 |
+
conversation_context: Optional[Dict] = None,
|
| 44 |
+
) -> Tuple[str, float, Dict]:
|
| 45 |
+
"""
|
| 46 |
+
Use LLM to understand what user wants
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
- intent: One of the capabilities above
|
| 50 |
+
- confidence: 0.0-1.0
|
| 51 |
+
- metadata: Language, extracted info, etc.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
# Build context for LLM
|
| 55 |
+
context_str = ""
|
| 56 |
+
if conversation_context:
|
| 57 |
+
status = conversation_context.get("status")
|
| 58 |
+
draft_preview = conversation_context.get("draft_preview")
|
| 59 |
+
editing_field = conversation_context.get("editing_field")
|
| 60 |
+
|
| 61 |
+
if draft_preview:
|
| 62 |
+
context_str = f"\nβ οΈ IMPORTANT: User is currently viewing a DRAFT LISTING PREVIEW. So commands like 'publish', 'edit', 'discard' are post-draft actions, NOT new requests."
|
| 63 |
+
|
| 64 |
+
if status == "editing" and editing_field:
|
| 65 |
+
context_str += f"\nβ οΈ User is currently editing the '{editing_field}' field. Whatever they say is the new value for that field."
|
| 66 |
+
|
| 67 |
+
prompt = f"""You are Aida's intent analyzer. Your job is to understand what the user wants.
|
| 68 |
+
|
| 69 |
+
{AIDA_CAPABILITIES}
|
| 70 |
+
|
| 71 |
+
{context_str}
|
| 72 |
+
|
| 73 |
+
User message: "{user_message}"
|
| 74 |
+
|
| 75 |
+
Respond in ONLY valid JSON (no markdown, no explanation):
|
| 76 |
+
{{
|
| 77 |
+
"intent": "one of the 11 intents above",
|
| 78 |
+
"confidence": 0.0-1.0,
|
| 79 |
+
"language": "detected language code (en, fr, es, pt, etc)",
|
| 80 |
+
"explanation": "brief reason why you chose this intent"
|
| 81 |
+
}}
|
| 82 |
+
|
| 83 |
+
If unclear, pick CASUAL_CHAT. Always respond with valid JSON only."""
|
| 84 |
+
|
| 85 |
+
try:
|
| 86 |
+
response = await client.chat.completions.create(
|
| 87 |
+
model="deepseek-chat",
|
| 88 |
+
messages=[{"role": "user", "content": prompt}],
|
| 89 |
+
temperature=0.3, # More deterministic
|
| 90 |
+
max_tokens=200,
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
raw = response.choices[0].message.content.strip()
|
| 94 |
+
|
| 95 |
+
# Clean JSON if needed
|
| 96 |
+
raw = re.sub(r'```json\s*|\s*```', '', raw)
|
| 97 |
+
|
| 98 |
+
data = json.loads(raw)
|
| 99 |
+
|
| 100 |
+
intent = data.get("intent", "CASUAL_CHAT")
|
| 101 |
+
confidence = float(data.get("confidence", 0.5))
|
| 102 |
+
language = data.get("language", "en")
|
| 103 |
+
|
| 104 |
+
logger.info(
|
| 105 |
+
"π― Intent detected by LLM",
|
| 106 |
+
intent=intent,
|
| 107 |
+
confidence=confidence,
|
| 108 |
+
language=language,
|
| 109 |
+
explanation=data.get("explanation", "")
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
return intent, confidence, {"language": language, "explanation": data.get("explanation")}
|
| 113 |
+
|
| 114 |
+
except Exception as e:
|
| 115 |
+
logger.error("β LLM intent detection failed", exc_info=e)
|
| 116 |
+
# Fallback: treat as casual chat
|
| 117 |
+
return "CASUAL_CHAT", 0.3, {"language": "en", "error": str(e)}
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# ========== SIMPLE LLM-BASED RESPONSE GENERATION ==========
|
| 121 |
+
async def generate_aida_response(
|
| 122 |
+
user_message: str,
|
| 123 |
+
intent: str,
|
| 124 |
+
confidence: float,
|
| 125 |
+
conversation_context: Optional[Dict] = None,
|
| 126 |
+
language: str = "en",
|
| 127 |
+
) -> str:
|
| 128 |
+
"""
|
| 129 |
+
Use LLM to generate natural, friendly response from Aida
|
| 130 |
+
|
| 131 |
+
Aida responds naturally - not robotic
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
# Build context
|
| 135 |
+
context_str = ""
|
| 136 |
+
if conversation_context:
|
| 137 |
+
status = conversation_context.get("status")
|
| 138 |
+
draft_preview = conversation_context.get("draft_preview")
|
| 139 |
+
missing_fields = conversation_context.get("missing_fields", [])
|
| 140 |
+
|
| 141 |
+
if status == "collecting":
|
| 142 |
+
context_str = f"\nContext: Currently collecting property listing details. Missing fields: {missing_fields}. Last message from user seemed to provide some info."
|
| 143 |
+
elif status == "asking_optional":
|
| 144 |
+
context_str = "\nContext: User has provided all required fields. Now asking about optional amenities/requirements."
|
| 145 |
+
elif draft_preview:
|
| 146 |
+
context_str = "\nContext: Draft preview is showing. User is reviewing their listing before publishing."
|
| 147 |
+
|
| 148 |
+
prompt = f"""You are Aida, a friendly real estate AI assistant on the Lojiz platform.
|
| 149 |
+
|
| 150 |
+
Current detected intent: {intent}
|
| 151 |
+
Confidence: {confidence:.0%}
|
| 152 |
+
User's language: {language}
|
| 153 |
+
{context_str}
|
| 154 |
+
|
| 155 |
+
User just wrote: "{user_message}"
|
| 156 |
+
|
| 157 |
+
Now respond as Aida would - naturally, friendly, helpful. NOT robotic.
|
| 158 |
+
- If collecting property info: Ask for next missing field naturally
|
| 159 |
+
- If showing draft: Ask if they want to publish, edit, or discard
|
| 160 |
+
- If casual chat: Have a friendly conversation while steering back to real estate if appropriate
|
| 161 |
+
- Match the user's language
|
| 162 |
+
- Be warm and personable
|
| 163 |
+
|
| 164 |
+
Your response (natural text, not JSON)."""
|
| 165 |
+
|
| 166 |
+
try:
|
| 167 |
+
response = await client.chat.completions.create(
|
| 168 |
+
model="deepseek-chat",
|
| 169 |
+
messages=[{"role": "user", "content": prompt}],
|
| 170 |
+
temperature=0.7, # More creative/natural
|
| 171 |
+
max_tokens=300,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
aida_response = response.choices[0].message.content.strip()
|
| 175 |
+
|
| 176 |
+
logger.info("π¬ Generated Aida response", length=len(aida_response))
|
| 177 |
+
return aida_response
|
| 178 |
+
|
| 179 |
+
except Exception as e:
|
| 180 |
+
logger.error("β LLM response generation failed", exc_info=e)
|
| 181 |
+
# Fallback response
|
| 182 |
+
fallbacks = {
|
| 183 |
+
"en": "Sorry, I had a moment there. What were you saying?",
|
| 184 |
+
"fr": "DΓ©solΓ©, je me suis perdu. Qu'aviez-vous dit?",
|
| 185 |
+
}
|
| 186 |
+
return fallbacks.get(language, "Sorry, what?")
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# ========== COMPLETE FLOW ==========
|
| 190 |
+
async def process_user_message(
|
| 191 |
+
user_message: str,
|
| 192 |
+
conversation_context: Optional[Dict] = None,
|
| 193 |
+
) -> Dict:
|
| 194 |
+
"""
|
| 195 |
+
Complete pipeline:
|
| 196 |
+
1. Detect intent with LLM
|
| 197 |
+
2. Generate natural response
|
| 198 |
+
3. Return structured data
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
# Step 1: Detect intent
|
| 202 |
+
intent, confidence, metadata = await detect_intent_with_llm(user_message, conversation_context)
|
| 203 |
+
|
| 204 |
+
# Step 2: Generate response
|
| 205 |
+
response = await generate_aida_response(
|
| 206 |
+
user_message=user_message,
|
| 207 |
+
intent=intent,
|
| 208 |
+
confidence=confidence,
|
| 209 |
+
conversation_context=conversation_context,
|
| 210 |
+
language=metadata.get("language", "en"),
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
return {
|
| 214 |
+
"intent": intent,
|
| 215 |
+
"confidence": confidence,
|
| 216 |
+
"language": metadata.get("language", "en"),
|
| 217 |
+
"response": response,
|
| 218 |
+
"explanation": metadata.get("explanation", ""),
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# ========== TEST ==========
|
| 223 |
+
if __name__ == "__main__":
|
| 224 |
+
import asyncio
|
| 225 |
+
|
| 226 |
+
async def test():
|
| 227 |
+
test_messages = [
|
| 228 |
+
"Hi! How are you?",
|
| 229 |
+
"I want to list my apartment in Lagos",
|
| 230 |
+
"Show me 2-bed apartments under 100k",
|
| 231 |
+
"Edit the price to 60k",
|
| 232 |
+
"Publish this",
|
| 233 |
+
"What's your favorite food?",
|
| 234 |
+
]
|
| 235 |
+
|
| 236 |
+
print("\n" + "="*70)
|
| 237 |
+
print("π§ͺ AIDA INTENT DETECTION TEST")
|
| 238 |
+
print("="*70 + "\n")
|
| 239 |
+
|
| 240 |
+
for msg in test_messages:
|
| 241 |
+
result = await process_user_message(msg)
|
| 242 |
+
|
| 243 |
+
print(f"π¨ User: {msg}")
|
| 244 |
+
print(f"π― Intent: {result['intent']} ({result['confidence']:.0%})")
|
| 245 |
+
print(f"π Language: {result['language']}")
|
| 246 |
+
print(f"π¬ Aida: {result['response']}")
|
| 247 |
+
print("-" * 70 + "\n")
|
| 248 |
+
|
| 249 |
+
asyncio.run(test())
|
app/ai/memory/redis_context_memory.py
ADDED
|
@@ -0,0 +1,462 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/memory/redis_context_memory.py - Redis-Based Context Memory
|
| 2 |
+
"""
|
| 3 |
+
Conversation memory management with Redis:
|
| 4 |
+
- Persists across server restarts
|
| 5 |
+
- Fast in-memory access
|
| 6 |
+
- Auto-expires old conversations (7 days)
|
| 7 |
+
- Stores: message history + conversation context
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import json
|
| 11 |
+
from typing import Dict, List, Optional
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
from structlog import get_logger
|
| 14 |
+
from app.ai.config import redis_client
|
| 15 |
+
|
| 16 |
+
logger = get_logger(__name__)
|
| 17 |
+
|
| 18 |
+
# Redis key prefixes
|
| 19 |
+
HISTORY_PREFIX = "aida:history" # {user_id}:{session_id}
|
| 20 |
+
CONTEXT_PREFIX = "aida:context" # {user_id}:{session_id}
|
| 21 |
+
TTL = 60 * 60 * 24 * 7 # 7 days
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# ========== REDIS CONVERSATION MEMORY ==========
|
| 25 |
+
class RedisConversationMemory:
|
| 26 |
+
"""
|
| 27 |
+
Redis-based conversation memory
|
| 28 |
+
|
| 29 |
+
Stores in Redis:
|
| 30 |
+
- Message history: aida:history:{user_id}:{session_id}
|
| 31 |
+
- Context: aida:context:{user_id}:{session_id}
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(self, user_id: str, session_id: str):
|
| 35 |
+
self.user_id = user_id
|
| 36 |
+
self.session_id = session_id
|
| 37 |
+
self.history_key = f"{HISTORY_PREFIX}:{user_id}:{session_id}"
|
| 38 |
+
self.context_key = f"{CONTEXT_PREFIX}:{user_id}:{session_id}"
|
| 39 |
+
|
| 40 |
+
logger.info("πΎ RedisConversationMemory created", user_id=user_id, session_id=session_id)
|
| 41 |
+
|
| 42 |
+
# ========== ADD MESSAGE ==========
|
| 43 |
+
async def add_message(
|
| 44 |
+
self,
|
| 45 |
+
role: str, # "user" or "assistant"
|
| 46 |
+
content: str,
|
| 47 |
+
metadata: Optional[Dict] = None,
|
| 48 |
+
) -> None:
|
| 49 |
+
"""
|
| 50 |
+
Add message to history in Redis
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
# Get current history
|
| 55 |
+
history = await self._get_history_list()
|
| 56 |
+
|
| 57 |
+
# Add new message
|
| 58 |
+
message = {
|
| 59 |
+
"role": role,
|
| 60 |
+
"content": content,
|
| 61 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 62 |
+
"metadata": metadata or {},
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
history.append(message)
|
| 66 |
+
|
| 67 |
+
# Save back to Redis
|
| 68 |
+
await redis_client.setex(
|
| 69 |
+
self.history_key,
|
| 70 |
+
TTL,
|
| 71 |
+
json.dumps(history, ensure_ascii=False),
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
logger.info(
|
| 75 |
+
f"π Added {role} message",
|
| 76 |
+
user_id=self.user_id,
|
| 77 |
+
session_id=self.session_id,
|
| 78 |
+
total_messages=len(history)
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
except Exception as e:
|
| 82 |
+
logger.error("β Failed to add message", exc_info=e)
|
| 83 |
+
|
| 84 |
+
# ========== GET MESSAGE HISTORY (Internal) ==========
|
| 85 |
+
async def _get_history_list(self) -> List[Dict]:
|
| 86 |
+
"""Get raw history list from Redis"""
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
raw = await redis_client.get(self.history_key)
|
| 90 |
+
if raw is None:
|
| 91 |
+
return []
|
| 92 |
+
return json.loads(raw)
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error("β Failed to get history", exc_info=e)
|
| 95 |
+
return []
|
| 96 |
+
|
| 97 |
+
# ========== GET MESSAGE HISTORY ==========
|
| 98 |
+
async def get_messages(
|
| 99 |
+
self,
|
| 100 |
+
limit: Optional[int] = None,
|
| 101 |
+
) -> List[Dict]:
|
| 102 |
+
"""
|
| 103 |
+
Get message history
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
limit: Optional limit of recent messages (e.g., last 10)
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
history = await self._get_history_list()
|
| 110 |
+
|
| 111 |
+
if limit:
|
| 112 |
+
return history[-limit:]
|
| 113 |
+
return history
|
| 114 |
+
|
| 115 |
+
# ========== GET FORMATTED HISTORY ==========
|
| 116 |
+
async def get_formatted_history(self) -> str:
|
| 117 |
+
"""
|
| 118 |
+
Get conversation history as formatted string for LLM context
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
messages = await self.get_messages()
|
| 122 |
+
formatted = []
|
| 123 |
+
|
| 124 |
+
for msg in messages:
|
| 125 |
+
role = "User" if msg["role"] == "user" else "Aida"
|
| 126 |
+
content = msg["content"]
|
| 127 |
+
formatted.append(f"{role}: {content}")
|
| 128 |
+
|
| 129 |
+
return "\n".join(formatted)
|
| 130 |
+
|
| 131 |
+
# ========== GET CONTEXT ==========
|
| 132 |
+
async def get_context(self) -> Dict:
|
| 133 |
+
"""Get current conversation context from Redis"""
|
| 134 |
+
|
| 135 |
+
try:
|
| 136 |
+
raw = await redis_client.get(self.context_key)
|
| 137 |
+
if raw is None:
|
| 138 |
+
# Return default context
|
| 139 |
+
return {
|
| 140 |
+
"status": "idle",
|
| 141 |
+
"language": "en",
|
| 142 |
+
"user_role": None,
|
| 143 |
+
"draft": None,
|
| 144 |
+
"state": {},
|
| 145 |
+
}
|
| 146 |
+
return json.loads(raw)
|
| 147 |
+
except Exception as e:
|
| 148 |
+
logger.error("β Failed to get context", exc_info=e)
|
| 149 |
+
return {}
|
| 150 |
+
|
| 151 |
+
# ========== UPDATE CONTEXT ==========
|
| 152 |
+
async def update_context(self, updates: Dict) -> None:
|
| 153 |
+
"""
|
| 154 |
+
Update conversation context in Redis
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
try:
|
| 158 |
+
# Get current context
|
| 159 |
+
context = await self.get_context()
|
| 160 |
+
|
| 161 |
+
# Update with new values
|
| 162 |
+
context.update(updates)
|
| 163 |
+
|
| 164 |
+
# Save back to Redis
|
| 165 |
+
await redis_client.setex(
|
| 166 |
+
self.context_key,
|
| 167 |
+
TTL,
|
| 168 |
+
json.dumps(context, ensure_ascii=False, default=str),
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
logger.info(
|
| 172 |
+
"π Updated context",
|
| 173 |
+
user_id=self.user_id,
|
| 174 |
+
session_id=self.session_id,
|
| 175 |
+
keys=list(updates.keys())
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
except Exception as e:
|
| 179 |
+
logger.error("β Failed to update context", exc_info=e)
|
| 180 |
+
|
| 181 |
+
# ========== GET SUMMARY ==========
|
| 182 |
+
async def get_summary(self) -> Dict:
|
| 183 |
+
"""Get conversation summary"""
|
| 184 |
+
|
| 185 |
+
try:
|
| 186 |
+
messages = await self.get_messages()
|
| 187 |
+
context = await self.get_context()
|
| 188 |
+
|
| 189 |
+
return {
|
| 190 |
+
"user_id": self.user_id,
|
| 191 |
+
"session_id": self.session_id,
|
| 192 |
+
"total_messages": len(messages),
|
| 193 |
+
"status": context.get("status", "idle"),
|
| 194 |
+
"language": context.get("language", "en"),
|
| 195 |
+
"has_draft": context.get("draft") is not None,
|
| 196 |
+
}
|
| 197 |
+
except Exception as e:
|
| 198 |
+
logger.error("β Failed to get summary", exc_info=e)
|
| 199 |
+
return {}
|
| 200 |
+
|
| 201 |
+
# ========== CLEAR MEMORY ==========
|
| 202 |
+
async def clear(self) -> None:
|
| 203 |
+
"""Clear conversation memory (start new chat)"""
|
| 204 |
+
|
| 205 |
+
try:
|
| 206 |
+
await redis_client.delete(self.history_key)
|
| 207 |
+
await redis_client.delete(self.context_key)
|
| 208 |
+
|
| 209 |
+
logger.info("ποΈ Conversation memory cleared", user_id=self.user_id, session_id=self.session_id)
|
| 210 |
+
except Exception as e:
|
| 211 |
+
logger.error("β Failed to clear memory", exc_info=e)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
# ========== REDIS MEMORY MANAGER ==========
|
| 215 |
+
class RedisMemoryManager:
|
| 216 |
+
"""
|
| 217 |
+
Global manager for Redis-based conversations
|
| 218 |
+
|
| 219 |
+
No need to store in-memory - Redis is the single source of truth
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
# ========== CREATE/GET SESSION ==========
|
| 223 |
+
async def get_or_create_session(
|
| 224 |
+
self,
|
| 225 |
+
user_id: str,
|
| 226 |
+
session_id: str,
|
| 227 |
+
) -> RedisConversationMemory:
|
| 228 |
+
"""
|
| 229 |
+
Get or create conversation memory for a session
|
| 230 |
+
|
| 231 |
+
With Redis, we always "create" (or retrieve from Redis)
|
| 232 |
+
"""
|
| 233 |
+
|
| 234 |
+
memory = RedisConversationMemory(user_id, session_id)
|
| 235 |
+
|
| 236 |
+
logger.info("β
Session memory ready", user_id=user_id, session_id=session_id)
|
| 237 |
+
return memory
|
| 238 |
+
|
| 239 |
+
# ========== CLOSE SESSION ==========
|
| 240 |
+
async def close_session(self, user_id: str, session_id: str) -> None:
|
| 241 |
+
"""
|
| 242 |
+
Close/clear a session
|
| 243 |
+
|
| 244 |
+
Optional - can let Redis auto-expire it (7 days)
|
| 245 |
+
Or explicitly clear it
|
| 246 |
+
"""
|
| 247 |
+
|
| 248 |
+
try:
|
| 249 |
+
memory = RedisConversationMemory(user_id, session_id)
|
| 250 |
+
await memory.clear()
|
| 251 |
+
|
| 252 |
+
logger.info("β Session closed", user_id=user_id, session_id=session_id)
|
| 253 |
+
except Exception as e:
|
| 254 |
+
logger.error("β Failed to close session", exc_info=e)
|
| 255 |
+
|
| 256 |
+
# ========== GET USER SESSION HISTORY ==========
|
| 257 |
+
async def get_user_history(self, user_id: str) -> List[Dict]:
|
| 258 |
+
"""
|
| 259 |
+
Get all sessions' message history for a user
|
| 260 |
+
(Useful for "show all my chats" feature)
|
| 261 |
+
"""
|
| 262 |
+
|
| 263 |
+
try:
|
| 264 |
+
# Scan Redis for all keys matching user_id
|
| 265 |
+
pattern = f"{HISTORY_PREFIX}:{user_id}:*"
|
| 266 |
+
keys = await redis_client.keys(pattern)
|
| 267 |
+
|
| 268 |
+
all_messages = []
|
| 269 |
+
for key in keys:
|
| 270 |
+
raw = await redis_client.get(key)
|
| 271 |
+
if raw:
|
| 272 |
+
messages = json.loads(raw)
|
| 273 |
+
all_messages.extend(messages)
|
| 274 |
+
|
| 275 |
+
logger.info("π Retrieved user history", user_id=user_id, total_messages=len(all_messages))
|
| 276 |
+
return all_messages
|
| 277 |
+
|
| 278 |
+
except Exception as e:
|
| 279 |
+
logger.error("β Failed to get user history", exc_info=e)
|
| 280 |
+
return []
|
| 281 |
+
|
| 282 |
+
# ========== CLEAR ALL USER SESSIONS ==========
|
| 283 |
+
async def clear_user_sessions(self, user_id: str) -> None:
|
| 284 |
+
"""
|
| 285 |
+
Clear all sessions for a user (user logs out)
|
| 286 |
+
"""
|
| 287 |
+
|
| 288 |
+
try:
|
| 289 |
+
# Delete all Redis keys for this user
|
| 290 |
+
history_pattern = f"{HISTORY_PREFIX}:{user_id}:*"
|
| 291 |
+
context_pattern = f"{CONTEXT_PREFIX}:{user_id}:*"
|
| 292 |
+
|
| 293 |
+
history_keys = await redis_client.keys(history_pattern)
|
| 294 |
+
context_keys = await redis_client.keys(context_pattern)
|
| 295 |
+
|
| 296 |
+
all_keys = history_keys + context_keys
|
| 297 |
+
|
| 298 |
+
if all_keys:
|
| 299 |
+
await redis_client.delete(*all_keys)
|
| 300 |
+
|
| 301 |
+
logger.info("ποΈ All user sessions cleared", user_id=user_id, sessions=len(history_keys))
|
| 302 |
+
|
| 303 |
+
except Exception as e:
|
| 304 |
+
logger.error("β Failed to clear user sessions", exc_info=e)
|
| 305 |
+
|
| 306 |
+
# ========== GET REDIS STATS ==========
|
| 307 |
+
async def get_stats(self) -> Dict:
|
| 308 |
+
"""Get Redis memory stats"""
|
| 309 |
+
|
| 310 |
+
try:
|
| 311 |
+
# Count all Aida keys
|
| 312 |
+
history_keys = await redis_client.keys(f"{HISTORY_PREFIX}:*")
|
| 313 |
+
context_keys = await redis_client.keys(f"{CONTEXT_PREFIX}:*")
|
| 314 |
+
|
| 315 |
+
# Count unique users
|
| 316 |
+
unique_users = set()
|
| 317 |
+
for key in history_keys + context_keys:
|
| 318 |
+
parts = key.split(":")
|
| 319 |
+
if len(parts) >= 3:
|
| 320 |
+
unique_users.add(parts[1])
|
| 321 |
+
|
| 322 |
+
return {
|
| 323 |
+
"total_history_keys": len(history_keys),
|
| 324 |
+
"total_context_keys": len(context_keys),
|
| 325 |
+
"unique_users": len(unique_users),
|
| 326 |
+
"redis_status": "connected",
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
except Exception as e:
|
| 330 |
+
logger.error("β Failed to get stats", exc_info=e)
|
| 331 |
+
return {"redis_status": "error"}
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
# ========== SINGLETON INSTANCE ==========
|
| 335 |
+
_memory_manager = None
|
| 336 |
+
|
| 337 |
+
def get_memory_manager() -> RedisMemoryManager:
|
| 338 |
+
"""Get or create global Redis memory manager"""
|
| 339 |
+
global _memory_manager
|
| 340 |
+
if _memory_manager is None:
|
| 341 |
+
_memory_manager = RedisMemoryManager()
|
| 342 |
+
return _memory_manager
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
# ========== HELPER: Get Current Memory ==========
|
| 346 |
+
async def get_current_memory(
|
| 347 |
+
user_id: str,
|
| 348 |
+
session_id: str,
|
| 349 |
+
) -> RedisConversationMemory:
|
| 350 |
+
"""
|
| 351 |
+
Get or create current session memory from Redis
|
| 352 |
+
|
| 353 |
+
Use this in your routes/services
|
| 354 |
+
"""
|
| 355 |
+
|
| 356 |
+
manager = get_memory_manager()
|
| 357 |
+
memory = await manager.get_or_create_session(user_id, session_id)
|
| 358 |
+
return memory
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
# ========== INTEGRATION EXAMPLE ==========
|
| 362 |
+
"""
|
| 363 |
+
Example integration with PropertyListingService:
|
| 364 |
+
|
| 365 |
+
from app.ai.memory.redis_context_memory import get_current_memory
|
| 366 |
+
|
| 367 |
+
async def handle_user_message(
|
| 368 |
+
user_id: str,
|
| 369 |
+
session_id: str,
|
| 370 |
+
user_message: str,
|
| 371 |
+
user_role: str,
|
| 372 |
+
):
|
| 373 |
+
# Get memory from Redis (creates if doesn't exist)
|
| 374 |
+
memory = await get_current_memory(user_id, session_id)
|
| 375 |
+
|
| 376 |
+
# Add user message
|
| 377 |
+
await memory.add_message("user", user_message)
|
| 378 |
+
|
| 379 |
+
# Get context
|
| 380 |
+
context = await memory.get_context()
|
| 381 |
+
|
| 382 |
+
# Service logic
|
| 383 |
+
service = PropertyListingService()
|
| 384 |
+
|
| 385 |
+
if context["status"] == "idle":
|
| 386 |
+
result = await service.start_listing(user_id, user_role, context["language"])
|
| 387 |
+
|
| 388 |
+
# Update memory
|
| 389 |
+
await memory.update_context({
|
| 390 |
+
"status": "listing",
|
| 391 |
+
"language": context["language"],
|
| 392 |
+
"user_role": user_role,
|
| 393 |
+
})
|
| 394 |
+
|
| 395 |
+
await memory.add_message("assistant", result["example"])
|
| 396 |
+
|
| 397 |
+
elif context["status"] == "listing":
|
| 398 |
+
result = await service.continue_listing(user_message, context.get("state", {}))
|
| 399 |
+
|
| 400 |
+
await memory.update_context({
|
| 401 |
+
"state": result.get("state"),
|
| 402 |
+
"draft": result.get("draft"),
|
| 403 |
+
})
|
| 404 |
+
|
| 405 |
+
await memory.add_message("assistant", result.get("message", ""))
|
| 406 |
+
|
| 407 |
+
return result
|
| 408 |
+
"""
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
# ========== TEST ==========
|
| 412 |
+
if __name__ == "__main__":
|
| 413 |
+
import asyncio
|
| 414 |
+
|
| 415 |
+
async def test():
|
| 416 |
+
print("\n" + "="*70)
|
| 417 |
+
print("πΎ REDIS CONTEXT MEMORY TEST")
|
| 418 |
+
print("="*70 + "\n")
|
| 419 |
+
|
| 420 |
+
manager = get_memory_manager()
|
| 421 |
+
|
| 422 |
+
# Create/get session
|
| 423 |
+
print("1οΈβ£ Get or create session")
|
| 424 |
+
memory = await manager.get_or_create_session("user123", "session_abc")
|
| 425 |
+
print(f" Session: {memory.session_id}\n")
|
| 426 |
+
|
| 427 |
+
# Add messages
|
| 428 |
+
print("2οΈβ£ Add messages")
|
| 429 |
+
await memory.add_message("user", "I want to list a property")
|
| 430 |
+
await memory.add_message("assistant", "Great! Here's an example...")
|
| 431 |
+
await memory.add_message("user", "2-bed apartment in Lagos for 50k")
|
| 432 |
+
print(f" Messages added\n")
|
| 433 |
+
|
| 434 |
+
# Update context
|
| 435 |
+
print("3οΈβ£ Update context")
|
| 436 |
+
await memory.update_context({
|
| 437 |
+
"status": "listing",
|
| 438 |
+
"language": "en",
|
| 439 |
+
"user_role": "landlord",
|
| 440 |
+
})
|
| 441 |
+
context = await memory.get_context()
|
| 442 |
+
print(f" Status: {context['status']}")
|
| 443 |
+
print(f" Language: {context['language']}\n")
|
| 444 |
+
|
| 445 |
+
# Get messages
|
| 446 |
+
print("4οΈβ£ Get messages")
|
| 447 |
+
messages = await memory.get_messages()
|
| 448 |
+
print(f" Total messages: {len(messages)}\n")
|
| 449 |
+
|
| 450 |
+
# Get summary
|
| 451 |
+
print("5οΈβ£ Get summary")
|
| 452 |
+
summary = await memory.get_summary()
|
| 453 |
+
print(f" {json.dumps(summary, indent=2)}\n")
|
| 454 |
+
|
| 455 |
+
# Get stats
|
| 456 |
+
print("6οΈβ£ Get Redis stats")
|
| 457 |
+
stats = await manager.get_stats()
|
| 458 |
+
print(f" {json.dumps(stats, indent=2)}\n")
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
if __name__ == "__main__":
|
| 462 |
+
asyncio.run(test())
|
app/ai/modules/draft_preview.py
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/modules/draft_preview.py - Draft Generation & Preview Display
|
| 2 |
+
"""
|
| 3 |
+
Complete draft generation and preview display
|
| 4 |
+
- Build draft object from collected data
|
| 5 |
+
- Generate formatted preview for user to review
|
| 6 |
+
- Handle edits, publish, discard
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
from typing import Dict, List, Optional
|
| 11 |
+
from structlog import get_logger
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
|
| 14 |
+
logger = get_logger(__name__)
|
| 15 |
+
|
| 16 |
+
# ========== AMENITY ICONS ==========
|
| 17 |
+
AMENITY_ICONS = {
|
| 18 |
+
"wifi": "πΆ",
|
| 19 |
+
"parking": "π
ΏοΈ",
|
| 20 |
+
"furnished": "ποΈ",
|
| 21 |
+
"washing machine": "π§Ό",
|
| 22 |
+
"washing": "π§Ό",
|
| 23 |
+
"dryer": "π₯",
|
| 24 |
+
"balcony": "πΌοΈ",
|
| 25 |
+
"pool": "π",
|
| 26 |
+
"gym": "πͺ",
|
| 27 |
+
"garden": "πΏ",
|
| 28 |
+
"air conditioning": "βοΈ",
|
| 29 |
+
"ac": "βοΈ",
|
| 30 |
+
"kitchen": "π³",
|
| 31 |
+
"security": "π",
|
| 32 |
+
"elevator": "π",
|
| 33 |
+
"laundry": "π",
|
| 34 |
+
"heating": "π₯",
|
| 35 |
+
"hot water": "π‘οΈ",
|
| 36 |
+
"tv cable": "πΊ",
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# ========== FORMAT AMENITIES WITH ICONS ==========
|
| 41 |
+
def format_amenities_with_icons(amenities: List[str]) -> str:
|
| 42 |
+
"""
|
| 43 |
+
Format amenities list with emoji icons
|
| 44 |
+
|
| 45 |
+
Input: ["wifi", "parking", "furnished"]
|
| 46 |
+
Output: "πΆ Wifi | π
ΏοΈ Parking | ποΈ Furnished"
|
| 47 |
+
"""
|
| 48 |
+
if not amenities:
|
| 49 |
+
return "None specified"
|
| 50 |
+
|
| 51 |
+
formatted = []
|
| 52 |
+
for amenity in amenities:
|
| 53 |
+
amenity_lower = amenity.lower().strip()
|
| 54 |
+
icon = AMENITY_ICONS.get(amenity_lower, "β")
|
| 55 |
+
formatted.append(f"{icon} {amenity.title()}")
|
| 56 |
+
|
| 57 |
+
return " | ".join(formatted)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# ========== BUILD DRAFT OBJECT ==========
|
| 61 |
+
def build_draft_object(
|
| 62 |
+
user_id: str,
|
| 63 |
+
state: Dict,
|
| 64 |
+
) -> Dict:
|
| 65 |
+
"""
|
| 66 |
+
Build complete draft object from collected state
|
| 67 |
+
|
| 68 |
+
Returns MongoDB-ready document structure
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
draft = {
|
| 72 |
+
"owner_id": user_id,
|
| 73 |
+
"title": state.get("title", "Property Listing"),
|
| 74 |
+
"description": state.get("description", ""),
|
| 75 |
+
"location": state.get("location"),
|
| 76 |
+
"bedrooms": state.get("bedrooms"),
|
| 77 |
+
"bathrooms": state.get("bathrooms"),
|
| 78 |
+
"price": state.get("price"),
|
| 79 |
+
"price_type": state.get("price_type", "monthly"),
|
| 80 |
+
"listing_type": state.get("listing_type", "rent"),
|
| 81 |
+
"currency": state.get("currency", "XOF"),
|
| 82 |
+
"amenities": state.get("amenities", []),
|
| 83 |
+
"requirements": state.get("requirements"),
|
| 84 |
+
"images": state.get("images", []),
|
| 85 |
+
"status": "draft",
|
| 86 |
+
"created_at": datetime.utcnow().isoformat(),
|
| 87 |
+
"updated_at": datetime.utcnow().isoformat(),
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
logger.info("π¦ Built draft object", title=draft["title"], location=draft["location"])
|
| 91 |
+
return draft
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# ========== GENERATE PREVIEW TEXT ==========
|
| 95 |
+
def generate_preview_text(
|
| 96 |
+
draft: Dict,
|
| 97 |
+
language: str = "en",
|
| 98 |
+
) -> str:
|
| 99 |
+
"""
|
| 100 |
+
Generate beautiful formatted preview text for user to review
|
| 101 |
+
|
| 102 |
+
Shows all listing details in a readable format
|
| 103 |
+
"""
|
| 104 |
+
|
| 105 |
+
title = draft.get("title", "Property Listing")
|
| 106 |
+
location = draft.get("location", "Unknown")
|
| 107 |
+
bedrooms = draft.get("bedrooms", "?")
|
| 108 |
+
bathrooms = draft.get("bathrooms", "?")
|
| 109 |
+
price = draft.get("price", "?")
|
| 110 |
+
price_type = draft.get("price_type", "monthly")
|
| 111 |
+
currency = draft.get("currency", "XOF")
|
| 112 |
+
description = draft.get("description", "")
|
| 113 |
+
amenities = draft.get("amenities", [])
|
| 114 |
+
requirements = draft.get("requirements")
|
| 115 |
+
images = draft.get("images", [])
|
| 116 |
+
listing_type = draft.get("listing_type", "rent").upper()
|
| 117 |
+
|
| 118 |
+
# Format amenities with icons
|
| 119 |
+
amenities_text = format_amenities_with_icons(amenities)
|
| 120 |
+
|
| 121 |
+
# Build images section
|
| 122 |
+
images_count = len(images) if images else 0
|
| 123 |
+
images_section = f"\nπΌοΈ Images: {images_count} uploaded" if images_count > 0 else "\nπΌοΈ No images uploaded yet"
|
| 124 |
+
|
| 125 |
+
# Build requirements section
|
| 126 |
+
requirements_section = f"\nπ Requirements: {requirements}" if requirements else ""
|
| 127 |
+
|
| 128 |
+
# Build preview
|
| 129 |
+
preview = f"""
|
| 130 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 131 |
+
π LISTING PREVIEW
|
| 132 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 133 |
+
|
| 134 |
+
**{title}**
|
| 135 |
+
|
| 136 |
+
π Location: {location}
|
| 137 |
+
π·οΈ Type: {listing_type}
|
| 138 |
+
ποΈ Bedrooms: {bedrooms}
|
| 139 |
+
πΏ Bathrooms: {bathrooms}
|
| 140 |
+
π° Price: {price:,} {currency}/{price_type}
|
| 141 |
+
|
| 142 |
+
π Description:
|
| 143 |
+
{description}
|
| 144 |
+
|
| 145 |
+
β¨ Amenities:
|
| 146 |
+
{amenities_text}
|
| 147 |
+
|
| 148 |
+
{requirements_section}
|
| 149 |
+
{images_section}
|
| 150 |
+
|
| 151 |
+
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
logger.info("β¨ Generated preview", title=title, images=images_count)
|
| 155 |
+
return preview
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
# ========== GENERATE PREVIEW WITH OPTIONS ==========
|
| 159 |
+
async def generate_and_show_preview(
|
| 160 |
+
user_id: str,
|
| 161 |
+
state: Dict,
|
| 162 |
+
language: str = "en",
|
| 163 |
+
) -> Dict:
|
| 164 |
+
"""
|
| 165 |
+
Generate draft and preview for user review
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
- draft: Complete draft object
|
| 169 |
+
- preview_text: Formatted preview to show user
|
| 170 |
+
- options: Next actions (publish, edit, discard, add_images)
|
| 171 |
+
"""
|
| 172 |
+
|
| 173 |
+
# Build draft object
|
| 174 |
+
draft = build_draft_object(user_id, state)
|
| 175 |
+
|
| 176 |
+
# Generate preview text
|
| 177 |
+
preview_text = generate_preview_text(draft, language)
|
| 178 |
+
|
| 179 |
+
# Determine next options based on images
|
| 180 |
+
images_count = len(draft.get("images", []))
|
| 181 |
+
|
| 182 |
+
if images_count == 0:
|
| 183 |
+
next_message = {
|
| 184 |
+
"en": "\nπΈ You can now:\nβ’ Add images to make your listing more attractive\nβ’ **Publish** to make it live\nβ’ **Edit** any details\nβ’ **Discard** to start over",
|
| 185 |
+
"fr": "\nπΈ Vous pouvez maintenant:\nβ’ Ajouter des images pour rendre votre annonce plus attrayante\nβ’ **Publier** pour la mettre en ligne\nβ’ **Γditer** les dΓ©tails\nβ’ **Annuler** pour recommencer",
|
| 186 |
+
}
|
| 187 |
+
else:
|
| 188 |
+
next_message = {
|
| 189 |
+
"en": f"\nβ
Great! You have {images_count} image(s). You can now:\nβ’ Add more images\nβ’ **Publish** to make it live\nβ’ **Edit** any details\nβ’ **Discard** to start over",
|
| 190 |
+
"fr": f"\nβ
Excellent! Vous avez {images_count} image(s). Vous pouvez maintenant:\nβ’ Ajouter plus d'images\nβ’ **Publier** pour la mettre en ligne\nβ’ **Γditer** les dΓ©tails\nβ’ **Annuler** pour recommencer",
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
full_preview = preview_text + next_message.get(language, next_message["en"])
|
| 194 |
+
|
| 195 |
+
return {
|
| 196 |
+
"action": "show_preview",
|
| 197 |
+
"draft": draft,
|
| 198 |
+
"preview_text": full_preview,
|
| 199 |
+
"options": ["publish", "edit", "discard", "add_image"],
|
| 200 |
+
"state": {
|
| 201 |
+
"user_id": user_id,
|
| 202 |
+
"status": "preview_shown",
|
| 203 |
+
"draft": draft,
|
| 204 |
+
"language": language,
|
| 205 |
+
}
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
# ========== EDIT DRAFT FIELD ==========
|
| 210 |
+
async def handle_edit_field(
|
| 211 |
+
user_input: str,
|
| 212 |
+
draft: Dict,
|
| 213 |
+
language: str = "en",
|
| 214 |
+
) -> Dict:
|
| 215 |
+
"""
|
| 216 |
+
Handle when user wants to edit a field
|
| 217 |
+
|
| 218 |
+
Extract which field to edit and return it
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
# Fields user can edit
|
| 222 |
+
editable_fields = [
|
| 223 |
+
"location", "bedrooms", "bathrooms", "price",
|
| 224 |
+
"amenities", "requirements", "title", "description"
|
| 225 |
+
]
|
| 226 |
+
|
| 227 |
+
# Find which field user wants to edit
|
| 228 |
+
detected_field = None
|
| 229 |
+
user_input_lower = user_input.lower()
|
| 230 |
+
|
| 231 |
+
for field in editable_fields:
|
| 232 |
+
if field in user_input_lower:
|
| 233 |
+
detected_field = field
|
| 234 |
+
break
|
| 235 |
+
|
| 236 |
+
if not detected_field:
|
| 237 |
+
# Can't determine field
|
| 238 |
+
logger.warning("β οΈ Could not detect which field to edit")
|
| 239 |
+
return {
|
| 240 |
+
"action": "ask_which_field",
|
| 241 |
+
"message": {
|
| 242 |
+
"en": "Which field would you like to edit? (location, bedrooms, bathrooms, price, amenities, requirements)",
|
| 243 |
+
"fr": "Quel champ voulez-vous modifier? (location, bedrooms, bathrooms, price, amenities, requirements)",
|
| 244 |
+
}.get(language),
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
logger.info(f"βοΈ User wants to edit {detected_field}")
|
| 248 |
+
|
| 249 |
+
return {
|
| 250 |
+
"action": "editing_field",
|
| 251 |
+
"field": detected_field,
|
| 252 |
+
"current_value": draft.get(detected_field),
|
| 253 |
+
"message": {
|
| 254 |
+
"en": f"What's the new value for {detected_field}?",
|
| 255 |
+
"fr": f"Quelle est la nouvelle valeur pour {detected_field}?",
|
| 256 |
+
}.get(language),
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
# ========== UPDATE DRAFT WITH EDIT ==========
|
| 261 |
+
async def update_draft_field(
|
| 262 |
+
draft: Dict,
|
| 263 |
+
field: str,
|
| 264 |
+
new_value: str,
|
| 265 |
+
) -> Dict:
|
| 266 |
+
"""
|
| 267 |
+
Update a specific field in draft and return updated draft
|
| 268 |
+
"""
|
| 269 |
+
|
| 270 |
+
old_value = draft.get(field)
|
| 271 |
+
|
| 272 |
+
# Parse value based on field type
|
| 273 |
+
if field in ["bedrooms", "bathrooms"]:
|
| 274 |
+
new_value = int(re.search(r'\d+', new_value).group()) if re.search(r'\d+', new_value) else old_value
|
| 275 |
+
elif field == "price":
|
| 276 |
+
new_value = float(re.search(r'\d+', new_value).group()) if re.search(r'\d+', new_value) else old_value
|
| 277 |
+
elif field == "amenities":
|
| 278 |
+
new_value = [a.strip() for a in new_value.split(",")]
|
| 279 |
+
|
| 280 |
+
# Update draft
|
| 281 |
+
draft[field] = new_value
|
| 282 |
+
draft["updated_at"] = datetime.utcnow().isoformat()
|
| 283 |
+
|
| 284 |
+
logger.info(f"β
Updated {field}", old=old_value, new=new_value)
|
| 285 |
+
|
| 286 |
+
return draft
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# ========== PUBLISH DRAFT ==========
|
| 290 |
+
async def publish_draft(
|
| 291 |
+
draft: Dict,
|
| 292 |
+
language: str = "en",
|
| 293 |
+
) -> Dict:
|
| 294 |
+
"""
|
| 295 |
+
Publish draft to MongoDB
|
| 296 |
+
|
| 297 |
+
Changes status from "draft" to "published"
|
| 298 |
+
"""
|
| 299 |
+
|
| 300 |
+
# Update draft status
|
| 301 |
+
draft["status"] = "published"
|
| 302 |
+
draft["published_at"] = datetime.utcnow().isoformat()
|
| 303 |
+
|
| 304 |
+
logger.info("β
Draft published", title=draft.get("title"))
|
| 305 |
+
|
| 306 |
+
# In real scenario, this would save to MongoDB
|
| 307 |
+
# For now, just mark as published
|
| 308 |
+
|
| 309 |
+
return {
|
| 310 |
+
"action": "published",
|
| 311 |
+
"draft": draft,
|
| 312 |
+
"message": {
|
| 313 |
+
"en": f"β
Your listing '{draft.get('title')}' is now live! Renters can see it.",
|
| 314 |
+
"fr": f"β
Votre annonce '{draft.get('title')}' est maintenant en ligne! Les locataires peuvent la voir.",
|
| 315 |
+
}.get(language),
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
# ========== DISCARD DRAFT ==========
|
| 320 |
+
def discard_draft(language: str = "en") -> Dict:
|
| 321 |
+
"""
|
| 322 |
+
Discard draft - user starts over
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
logger.info("β Draft discarded")
|
| 326 |
+
|
| 327 |
+
return {
|
| 328 |
+
"action": "discarded",
|
| 329 |
+
"message": {
|
| 330 |
+
"en": "β Your draft has been cleared. What would you like to do next?",
|
| 331 |
+
"fr": "β Votre brouillon a Γ©tΓ© effacΓ©. Que voulez-vous faire ensuite?",
|
| 332 |
+
}.get(language),
|
| 333 |
+
"state": {
|
| 334 |
+
"status": "idle",
|
| 335 |
+
"draft": None,
|
| 336 |
+
}
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# ========== COMPLETE FLOW EXAMPLE ==========
|
| 341 |
+
async def demo_complete_flow():
|
| 342 |
+
"""
|
| 343 |
+
Demonstration of complete draft -> preview -> publish flow
|
| 344 |
+
"""
|
| 345 |
+
|
| 346 |
+
print("\n" + "="*70)
|
| 347 |
+
print("π COMPLETE DRAFT & PREVIEW DEMO")
|
| 348 |
+
print("="*70 + "\n")
|
| 349 |
+
|
| 350 |
+
# Sample collected state
|
| 351 |
+
state = {
|
| 352 |
+
"title": "Beautiful 2-Bedroom Apartment in Lagos",
|
| 353 |
+
"description": "Spacious 2-bedroom, 1-bathroom apartment in the heart of Lagos. Fully furnished with modern amenities. Perfect for families or professionals.",
|
| 354 |
+
"location": "Lagos",
|
| 355 |
+
"bedrooms": 2,
|
| 356 |
+
"bathrooms": 1,
|
| 357 |
+
"price": 50000,
|
| 358 |
+
"price_type": "monthly",
|
| 359 |
+
"listing_type": "rent",
|
| 360 |
+
"currency": "NGN",
|
| 361 |
+
"amenities": ["wifi", "parking", "furnished"],
|
| 362 |
+
"requirements": "1-month security deposit required",
|
| 363 |
+
"images": ["https://cloudflare.com/img1.jpg", "https://cloudflare.com/img2.jpg"],
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
# Generate preview
|
| 367 |
+
result = await generate_and_show_preview("user123", state, language="en")
|
| 368 |
+
|
| 369 |
+
print(result["preview_text"])
|
| 370 |
+
print(f"\nπ Available Actions: {result['options']}\n")
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
if __name__ == "__main__":
|
| 374 |
+
import asyncio
|
| 375 |
+
import re
|
| 376 |
+
|
| 377 |
+
asyncio.run(demo_complete_flow())
|
app/ai/modules/image_upload.py
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/modules/image_upload.py - Image Upload & Cloudflare Integration
|
| 2 |
+
"""
|
| 3 |
+
Image upload to Cloudflare:
|
| 4 |
+
1. User provides image (base64 or file)
|
| 5 |
+
2. Upload to Cloudflare
|
| 6 |
+
3. Get public URL back
|
| 7 |
+
4. Save URL to listing state
|
| 8 |
+
5. Track number of images
|
| 9 |
+
6. Once at least 1 image uploaded, allow draft generation
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import base64
|
| 13 |
+
import mimetypes
|
| 14 |
+
import httpx
|
| 15 |
+
from typing import Dict, Optional, Tuple
|
| 16 |
+
from structlog import get_logger
|
| 17 |
+
from app.config import settings
|
| 18 |
+
|
| 19 |
+
logger = get_logger(__name__)
|
| 20 |
+
|
| 21 |
+
CF_UPLOAD_URL = f"https://api.cloudflare.com/client/v4/accounts/{settings.CF_ACCOUNT_ID}/images/v1"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# ========== UPLOAD IMAGE TO CLOUDFLARE ==========
|
| 25 |
+
async def upload_image_to_cloudflare(
|
| 26 |
+
image_data: str, # Base64 encoded image data
|
| 27 |
+
mime_type: str, # e.g., "image/jpeg", "image/png"
|
| 28 |
+
filename: str = "property_image",
|
| 29 |
+
) -> Tuple[bool, Optional[str], Optional[str]]:
|
| 30 |
+
"""
|
| 31 |
+
Upload image to Cloudflare
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
- success: bool - Whether upload succeeded
|
| 35 |
+
- public_url: str - Public Cloudflare URL if successful
|
| 36 |
+
- error: str - Error message if failed
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
# Decode base64 to binary
|
| 41 |
+
binary_data = base64.b64decode(image_data)
|
| 42 |
+
|
| 43 |
+
# Get file extension from mime type
|
| 44 |
+
ext = mimetypes.guess_extension(mime_type) or ".jpg"
|
| 45 |
+
full_filename = f"{filename}{ext}"
|
| 46 |
+
|
| 47 |
+
logger.info("π€ Uploading to Cloudflare", filename=full_filename, size_kb=len(binary_data) / 1024)
|
| 48 |
+
|
| 49 |
+
# Prepare headers
|
| 50 |
+
headers = {
|
| 51 |
+
"Authorization": f"Bearer {settings.CF_API_TOKEN}",
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
# Prepare files
|
| 55 |
+
files = {
|
| 56 |
+
"file": (full_filename, binary_data, mime_type),
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
# Upload to Cloudflare
|
| 60 |
+
async with httpx.AsyncClient(timeout=60) as client:
|
| 61 |
+
response = await client.post(
|
| 62 |
+
CF_UPLOAD_URL,
|
| 63 |
+
files=files,
|
| 64 |
+
headers=headers,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
response.raise_for_status()
|
| 68 |
+
data = response.json()
|
| 69 |
+
|
| 70 |
+
# Check if upload was successful
|
| 71 |
+
if not data.get("success"):
|
| 72 |
+
error_msg = str(data.get("errors", "Unknown error"))
|
| 73 |
+
logger.error("β Cloudflare upload failed", errors=error_msg)
|
| 74 |
+
return False, None, f"Upload failed: {error_msg}"
|
| 75 |
+
|
| 76 |
+
# Extract image ID and build public URL
|
| 77 |
+
image_id = data["result"]["id"]
|
| 78 |
+
public_url = f"https://imagedelivery.net/{image_id}/public"
|
| 79 |
+
|
| 80 |
+
logger.info("β
Image uploaded to Cloudflare", url=public_url)
|
| 81 |
+
return True, public_url, None
|
| 82 |
+
|
| 83 |
+
except base64.binascii.Error as e:
|
| 84 |
+
logger.error("β Invalid base64 data", exc_info=e)
|
| 85 |
+
return False, None, "Invalid image data format"
|
| 86 |
+
|
| 87 |
+
except httpx.RequestError as e:
|
| 88 |
+
logger.error("β Network error uploading to Cloudflare", exc_info=e)
|
| 89 |
+
return False, None, f"Network error: {str(e)}"
|
| 90 |
+
|
| 91 |
+
except Exception as e:
|
| 92 |
+
logger.error("β Unexpected error uploading image", exc_info=e)
|
| 93 |
+
return False, None, f"Upload error: {str(e)}"
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# ========== PROCESS IMAGE UPLOAD FROM USER ==========
|
| 97 |
+
async def process_image_upload(
|
| 98 |
+
image_base64: str,
|
| 99 |
+
mime_type: str,
|
| 100 |
+
state: Dict,
|
| 101 |
+
language: str = "en",
|
| 102 |
+
) -> Dict:
|
| 103 |
+
"""
|
| 104 |
+
Complete image upload flow:
|
| 105 |
+
1. Upload to Cloudflare
|
| 106 |
+
2. Get URL back
|
| 107 |
+
3. Add to state
|
| 108 |
+
4. Return response
|
| 109 |
+
|
| 110 |
+
Returns:
|
| 111 |
+
{
|
| 112 |
+
"success": bool,
|
| 113 |
+
"message": str,
|
| 114 |
+
"image_url": str (if successful),
|
| 115 |
+
"total_images": int,
|
| 116 |
+
"state": updated state dict,
|
| 117 |
+
"action": "upload_another" | "ready_for_draft"
|
| 118 |
+
}
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
# Upload to Cloudflare
|
| 122 |
+
success, url, error = await upload_image_to_cloudflare(image_base64, mime_type)
|
| 123 |
+
|
| 124 |
+
if not success:
|
| 125 |
+
logger.warning(f"β οΈ Image upload failed: {error}")
|
| 126 |
+
|
| 127 |
+
error_messages = {
|
| 128 |
+
"en": f"β Failed to upload image: {error}\n\nPlease try again or try a different image.",
|
| 129 |
+
"fr": f"β Impossible de tΓ©lΓ©charger l'image: {error}\n\nVeuillez rΓ©essayer ou essayer une image diffΓ©rente.",
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
return {
|
| 133 |
+
"success": False,
|
| 134 |
+
"message": error_messages.get(language, error_messages["en"]),
|
| 135 |
+
"action": "ask_retry",
|
| 136 |
+
"state": state,
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
# Add URL to state
|
| 140 |
+
if "images" not in state:
|
| 141 |
+
state["images"] = []
|
| 142 |
+
|
| 143 |
+
state["images"].append(url)
|
| 144 |
+
images_count = len(state["images"])
|
| 145 |
+
|
| 146 |
+
logger.info(f"β
Image added to listing", total_images=images_count)
|
| 147 |
+
|
| 148 |
+
# Build response
|
| 149 |
+
success_messages = {
|
| 150 |
+
"en": f"β
Image uploaded successfully!\n\nYou now have {images_count} image(s). You can:\nβ’ Add more images\nβ’ **Generate preview** to see your listing\nβ’ Or continue with the next step",
|
| 151 |
+
"fr": f"β
Image téléchargée avec succès!\n\nVous avez maintenant {images_count} image(s). Vous pouvez:\n⒠Ajouter plus d'images\n⒠**Générer un aperçu** pour voir votre annonce\n⒠Ou continuer avec l'étape suivante",
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
return {
|
| 155 |
+
"success": True,
|
| 156 |
+
"message": success_messages.get(language, success_messages["en"]),
|
| 157 |
+
"image_url": url,
|
| 158 |
+
"total_images": images_count,
|
| 159 |
+
"action": "upload_another_or_continue",
|
| 160 |
+
"state": state,
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
# ========== CHECK IF IMAGES ARE PRESENT ==========
|
| 165 |
+
def has_images(state: Dict) -> bool:
|
| 166 |
+
"""Check if listing has at least one image"""
|
| 167 |
+
images = state.get("images", [])
|
| 168 |
+
return len(images) > 0
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# ========== GET IMAGES STATUS ==========
|
| 172 |
+
def get_images_status(state: Dict, language: str = "en") -> Dict:
|
| 173 |
+
"""
|
| 174 |
+
Check images status and return appropriate message
|
| 175 |
+
|
| 176 |
+
If 0 images: "You need to upload at least 1 image"
|
| 177 |
+
If 1+ images: "You have X images, ready to generate draft"
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
images = state.get("images", [])
|
| 181 |
+
count = len(images)
|
| 182 |
+
|
| 183 |
+
if count == 0:
|
| 184 |
+
status_messages = {
|
| 185 |
+
"en": "πΈ You need to upload at least 1 image to continue.\n\nImages make your listing more attractive and help renters make decisions.",
|
| 186 |
+
"fr": "πΈ Vous devez tΓ©lΓ©charger au moins 1 image pour continuer.\n\nLes images rendent votre annonce plus attrayante et aident les locataires Γ dΓ©cider.",
|
| 187 |
+
}
|
| 188 |
+
return {
|
| 189 |
+
"has_images": False,
|
| 190 |
+
"count": 0,
|
| 191 |
+
"message": status_messages.get(language, status_messages["en"]),
|
| 192 |
+
"can_generate_draft": False,
|
| 193 |
+
}
|
| 194 |
+
else:
|
| 195 |
+
ready_messages = {
|
| 196 |
+
"en": f"β
You have {count} image(s)! Ready to generate your listing preview.",
|
| 197 |
+
"fr": f"β
Vous avez {count} image(s)! PrΓͺt Γ gΓ©nΓ©rer un aperΓ§u de votre annonce.",
|
| 198 |
+
}
|
| 199 |
+
return {
|
| 200 |
+
"has_images": True,
|
| 201 |
+
"count": count,
|
| 202 |
+
"message": ready_messages.get(language, ready_messages["en"]),
|
| 203 |
+
"can_generate_draft": True,
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
# ========== ASK FOR IMAGE UPLOAD ==========
|
| 208 |
+
def ask_for_image_upload(language: str = "en") -> str:
|
| 209 |
+
"""
|
| 210 |
+
Ask user to upload an image
|
| 211 |
+
"""
|
| 212 |
+
|
| 213 |
+
messages = {
|
| 214 |
+
"en": "πΈ Great! Now please upload at least one image of the property. This makes your listing much more attractive!\n\nYou can upload:\nβ’ Photo of the property\nβ’ Photo of the living room\nβ’ Photo of bedrooms\nβ’ Any interior/exterior photo",
|
| 215 |
+
"fr": "πΈ Excellent! Veuillez maintenant tΓ©lΓ©charger au moins une image de la propriΓ©tΓ©. Cela rend votre annonce beaucoup plus attrayante!\n\nVous pouvez tΓ©lΓ©charger:\nβ’ Photo de la propriΓ©tΓ©\nβ’ Photo du salon\nβ’ Photo des chambres\nβ’ N'importe quelle photo intΓ©rieure/extΓ©rieure",
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
return messages.get(language, messages["en"])
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
# ========== COMPLETE FLOW: Collect Required Fields -> Ask for Images -> Generate Draft ==========
|
| 222 |
+
async def handle_property_collection_and_images(
|
| 223 |
+
user_input: str,
|
| 224 |
+
current_state: Dict,
|
| 225 |
+
user_role: str,
|
| 226 |
+
language: str,
|
| 227 |
+
) -> Dict:
|
| 228 |
+
"""
|
| 229 |
+
Handle the complete collection flow including images
|
| 230 |
+
|
| 231 |
+
Flow:
|
| 232 |
+
1. Collect all required fields (location, bedrooms, bathrooms, price)
|
| 233 |
+
2. If all required fields complete:
|
| 234 |
+
- Check if images uploaded
|
| 235 |
+
- If no images: Ask user to upload
|
| 236 |
+
- If images present: Can generate draft
|
| 237 |
+
|
| 238 |
+
This function coordinates between field collection and image requirement
|
| 239 |
+
"""
|
| 240 |
+
|
| 241 |
+
from app.ai.modules.property_listing import build_listing_state, find_missing_fields
|
| 242 |
+
|
| 243 |
+
# First: Check if this is an image upload or field input
|
| 244 |
+
is_image_upload = "image" in current_state or current_state.get("uploading_image")
|
| 245 |
+
|
| 246 |
+
if not is_image_upload:
|
| 247 |
+
# Normal field collection flow
|
| 248 |
+
updated_state = await build_listing_state(
|
| 249 |
+
user_id=current_state.get("user_id"),
|
| 250 |
+
user_role=user_role,
|
| 251 |
+
language=language,
|
| 252 |
+
user_input=user_input,
|
| 253 |
+
current_state=current_state.get("raw_state", {}),
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
missing_fields = updated_state["missing_fields"]
|
| 257 |
+
|
| 258 |
+
if missing_fields:
|
| 259 |
+
# Still have missing required fields
|
| 260 |
+
from app.ai.modules.property_listing import generate_smart_question
|
| 261 |
+
|
| 262 |
+
next_field = missing_fields[0]
|
| 263 |
+
question = await generate_smart_question(next_field, language)
|
| 264 |
+
|
| 265 |
+
return {
|
| 266 |
+
"action": "ask_next_field",
|
| 267 |
+
"question": question,
|
| 268 |
+
"state": updated_state,
|
| 269 |
+
}
|
| 270 |
+
else:
|
| 271 |
+
# All required fields collected - NOW ask for images
|
| 272 |
+
logger.info("β
All required fields collected, asking for images")
|
| 273 |
+
|
| 274 |
+
return {
|
| 275 |
+
"action": "ask_for_images",
|
| 276 |
+
"message": ask_for_image_upload(language),
|
| 277 |
+
"state": {
|
| 278 |
+
**updated_state,
|
| 279 |
+
"status": "waiting_for_images",
|
| 280 |
+
},
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
else:
|
| 284 |
+
# This is an image upload - process it
|
| 285 |
+
return {
|
| 286 |
+
"action": "uploading_image",
|
| 287 |
+
"state": current_state,
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
# ========== COMPLETE LISTING WORKFLOW ==========
|
| 292 |
+
"""
|
| 293 |
+
WORKFLOW:
|
| 294 |
+
|
| 295 |
+
1. User: "I want to list my property"
|
| 296 |
+
β start_listing()
|
| 297 |
+
|
| 298 |
+
2. Aida: Shows example
|
| 299 |
+
|
| 300 |
+
3. User: "I have a 2-bed apartment in Lagos for 50k monthly with wifi"
|
| 301 |
+
β Field extraction + auto-detection
|
| 302 |
+
|
| 303 |
+
4. If missing fields: Ask for next field
|
| 304 |
+
If all fields complete: Go to step 5
|
| 305 |
+
|
| 306 |
+
5. Aida: "Great! Now please upload at least one image"
|
| 307 |
+
β ask_for_image_upload()
|
| 308 |
+
|
| 309 |
+
6. User: Uploads image (base64 encoded)
|
| 310 |
+
β process_image_upload() β upload to Cloudflare β get URL back
|
| 311 |
+
|
| 312 |
+
7. Aida: "Image uploaded! You can add more or continue"
|
| 313 |
+
|
| 314 |
+
8. User: "OK, generate preview" or "Continue"
|
| 315 |
+
β get_images_status() β check if images present β OK to generate draft
|
| 316 |
+
|
| 317 |
+
9. β
Generate draft with images
|
| 318 |
+
β from draft_preview.py: generate_and_show_preview()
|
| 319 |
+
|
| 320 |
+
10. Draft preview shown with:
|
| 321 |
+
- Title
|
| 322 |
+
- Description
|
| 323 |
+
- All details
|
| 324 |
+
- Images (thumbnails or count)
|
| 325 |
+
|
| 326 |
+
11. User: "Publish" / "Edit" / "Discard" / "Add more images"
|
| 327 |
+
"""
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
# ========== TEST ==========
|
| 331 |
+
if __name__ == "__main__":
|
| 332 |
+
import asyncio
|
| 333 |
+
|
| 334 |
+
async def test():
|
| 335 |
+
print("\n" + "="*70)
|
| 336 |
+
print("πΈ IMAGE UPLOAD TEST")
|
| 337 |
+
print("="*70 + "\n")
|
| 338 |
+
|
| 339 |
+
# Simulate a state after field collection
|
| 340 |
+
state = {
|
| 341 |
+
"user_id": "user123",
|
| 342 |
+
"location": "Lagos",
|
| 343 |
+
"bedrooms": 2,
|
| 344 |
+
"bathrooms": 1,
|
| 345 |
+
"price": 50000,
|
| 346 |
+
"price_type": "monthly",
|
| 347 |
+
"listing_type": "rent",
|
| 348 |
+
"currency": "NGN",
|
| 349 |
+
"amenities": ["wifi", "parking"],
|
| 350 |
+
"images": [],
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
# Check images status (should be 0)
|
| 354 |
+
status = get_images_status(state, "en")
|
| 355 |
+
print("1οΈβ£ Before upload:")
|
| 356 |
+
print(f" Message: {status['message']}\n")
|
| 357 |
+
|
| 358 |
+
# Ask for image upload
|
| 359 |
+
ask_msg = ask_for_image_upload("en")
|
| 360 |
+
print("2οΈβ£ Ask for image:")
|
| 361 |
+
print(f" {ask_msg}\n")
|
| 362 |
+
|
| 363 |
+
# Simulate image upload (this would be real base64 in production)
|
| 364 |
+
print("3οΈβ£ User uploads image...")
|
| 365 |
+
print(" (In real scenario, image would be base64 encoded)\n")
|
| 366 |
+
|
| 367 |
+
# After upload, check status again
|
| 368 |
+
state["images"] = ["https://imagedelivery.net/abc123/public"]
|
| 369 |
+
status = get_images_status(state, "en")
|
| 370 |
+
print("4οΈβ£ After upload:")
|
| 371 |
+
print(f" Has images: {status['has_images']}")
|
| 372 |
+
print(f" Count: {status['count']}")
|
| 373 |
+
print(f" Can generate draft: {status['can_generate_draft']}")
|
| 374 |
+
print(f" Message: {status['message']}\n")
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
asyncio.run(test())
|
app/ai/modules/post_draft_handler.py
ADDED
|
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/modules/post_draft_handler.py - Handle Publish/Edit/Discard
|
| 2 |
+
"""
|
| 3 |
+
Complete post-draft action handling:
|
| 4 |
+
1. Detect user's intent (publish, edit, discard)
|
| 5 |
+
2. Route to correct handler
|
| 6 |
+
3. For edits: Update field -> Regenerate preview
|
| 7 |
+
4. For publish: Save to MongoDB
|
| 8 |
+
5. For discard: Clear and reset
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import json
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
from typing import Dict, Optional
|
| 14 |
+
from openai import AsyncOpenAI
|
| 15 |
+
from structlog import get_logger
|
| 16 |
+
from app.config import settings
|
| 17 |
+
from app.database import get_db
|
| 18 |
+
from bson import ObjectId
|
| 19 |
+
|
| 20 |
+
logger = get_logger(__name__)
|
| 21 |
+
|
| 22 |
+
client = AsyncOpenAI(api_key=settings.DEEPSEEK_API_KEY, base_url=settings.DEEPSEEK_BASE_URL)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# ========== 1. DETECT POST-DRAFT ACTION ==========
|
| 26 |
+
async def detect_post_draft_action(
|
| 27 |
+
user_input: str,
|
| 28 |
+
language: str = "en",
|
| 29 |
+
) -> str:
|
| 30 |
+
"""
|
| 31 |
+
Use LLM to detect what user wants to do with their draft
|
| 32 |
+
|
| 33 |
+
Returns: "publish" | "edit" | "discard" | "add_image" | "unknown"
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
prompt = f"""User just saw their property listing preview. They said:
|
| 37 |
+
|
| 38 |
+
"{user_input}"
|
| 39 |
+
|
| 40 |
+
What do they want to do? Choose ONE:
|
| 41 |
+
- "publish" - They want to publish/confirm/post the listing
|
| 42 |
+
- "edit" - They want to change/edit something
|
| 43 |
+
- "discard" - They want to cancel/discard/delete the draft
|
| 44 |
+
- "add_image" - They want to add more images
|
| 45 |
+
- "unknown" - Can't tell what they want
|
| 46 |
+
|
| 47 |
+
Respond with ONLY one word: publish, edit, discard, add_image, or unknown"""
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
response = await client.chat.completions.create(
|
| 51 |
+
model="deepseek-chat",
|
| 52 |
+
messages=[{"role": "user", "content": prompt}],
|
| 53 |
+
temperature=0.1,
|
| 54 |
+
max_tokens=50,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
action = response.choices[0].message.content.strip().lower()
|
| 58 |
+
|
| 59 |
+
# Validate action
|
| 60 |
+
valid_actions = ["publish", "edit", "discard", "add_image", "unknown"]
|
| 61 |
+
if action not in valid_actions:
|
| 62 |
+
action = "unknown"
|
| 63 |
+
|
| 64 |
+
logger.info(f"π― Detected post-draft action", action=action, user_input=user_input[:50])
|
| 65 |
+
return action
|
| 66 |
+
|
| 67 |
+
except Exception as e:
|
| 68 |
+
logger.error("β Failed to detect action", exc_info=e)
|
| 69 |
+
return "unknown"
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# ========== 2. DETECT WHICH FIELD TO EDIT ==========
|
| 73 |
+
async def detect_field_to_edit(
|
| 74 |
+
user_input: str,
|
| 75 |
+
available_fields: list = None,
|
| 76 |
+
) -> Optional[str]:
|
| 77 |
+
"""
|
| 78 |
+
Detect which field user wants to edit
|
| 79 |
+
|
| 80 |
+
Available fields: location, bedrooms, bathrooms, price, amenities, requirements, title, description
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
if available_fields is None:
|
| 84 |
+
available_fields = ["location", "bedrooms", "bathrooms", "price", "amenities", "requirements", "title", "description"]
|
| 85 |
+
|
| 86 |
+
prompt = f"""User wants to edit their listing. They said:
|
| 87 |
+
|
| 88 |
+
"{user_input}"
|
| 89 |
+
|
| 90 |
+
Which field do they want to change? Choose from:
|
| 91 |
+
{', '.join(available_fields)}
|
| 92 |
+
|
| 93 |
+
Or respond "unknown" if you can't tell.
|
| 94 |
+
|
| 95 |
+
Respond with ONLY the field name or "unknown"."""
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
response = await client.chat.completions.create(
|
| 99 |
+
model="deepseek-chat",
|
| 100 |
+
messages=[{"role": "user", "content": prompt}],
|
| 101 |
+
temperature=0.1,
|
| 102 |
+
max_tokens=50,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
field = response.choices[0].message.content.strip().lower()
|
| 106 |
+
|
| 107 |
+
if field not in available_fields:
|
| 108 |
+
field = None
|
| 109 |
+
|
| 110 |
+
logger.info(f"βοΈ Detected field to edit", field=field)
|
| 111 |
+
return field
|
| 112 |
+
|
| 113 |
+
except Exception as e:
|
| 114 |
+
logger.error("β Failed to detect field", exc_info=e)
|
| 115 |
+
return None
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# ========== 3. EXTRACT NEW VALUE FOR FIELD ==========
|
| 119 |
+
async def extract_new_field_value(
|
| 120 |
+
user_input: str,
|
| 121 |
+
field: str,
|
| 122 |
+
) -> Optional[str]:
|
| 123 |
+
"""
|
| 124 |
+
Extract the new value user provided for the field they're editing
|
| 125 |
+
"""
|
| 126 |
+
|
| 127 |
+
prompt = f"""User is editing the "{field}" field of their property listing. They said:
|
| 128 |
+
|
| 129 |
+
"{user_input}"
|
| 130 |
+
|
| 131 |
+
Extract ONLY the new value for the {field} field.
|
| 132 |
+
- If it's a number (bedrooms, bathrooms, price), extract just the number
|
| 133 |
+
- If it's text (location, amenities, requirements), extract the text
|
| 134 |
+
- If multiple amenities, extract all separated by commas
|
| 135 |
+
|
| 136 |
+
Respond with ONLY the extracted value, nothing else."""
|
| 137 |
+
|
| 138 |
+
try:
|
| 139 |
+
response = await client.chat.completions.create(
|
| 140 |
+
model="deepseek-chat",
|
| 141 |
+
messages=[{"role": "user", "content": prompt}],
|
| 142 |
+
temperature=0.1,
|
| 143 |
+
max_tokens=100,
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
value = response.choices[0].message.content.strip()
|
| 147 |
+
|
| 148 |
+
logger.info(f"βοΈ Extracted new value", field=field, value=value[:50])
|
| 149 |
+
return value if value else None
|
| 150 |
+
|
| 151 |
+
except Exception as e:
|
| 152 |
+
logger.error("β Failed to extract value", exc_info=e)
|
| 153 |
+
return None
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# ========== 4. UPDATE DRAFT WITH EDIT ==========
|
| 157 |
+
def update_draft_field(
|
| 158 |
+
draft: Dict,
|
| 159 |
+
field: str,
|
| 160 |
+
new_value: str,
|
| 161 |
+
) -> Dict:
|
| 162 |
+
"""
|
| 163 |
+
Update a specific field in the draft
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
old_value = draft.get(field)
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
# Parse value based on field type
|
| 170 |
+
if field in ["bedrooms", "bathrooms"]:
|
| 171 |
+
# Extract number from string
|
| 172 |
+
import re
|
| 173 |
+
match = re.search(r'\d+', new_value)
|
| 174 |
+
if match:
|
| 175 |
+
new_value = int(match.group())
|
| 176 |
+
else:
|
| 177 |
+
logger.warning(f"β οΈ Could not parse number from {new_value}")
|
| 178 |
+
return draft
|
| 179 |
+
|
| 180 |
+
elif field == "price":
|
| 181 |
+
import re
|
| 182 |
+
match = re.search(r'\d+', new_value)
|
| 183 |
+
if match:
|
| 184 |
+
new_value = int(match.group())
|
| 185 |
+
else:
|
| 186 |
+
logger.warning(f"β οΈ Could not parse price from {new_value}")
|
| 187 |
+
return draft
|
| 188 |
+
|
| 189 |
+
elif field == "amenities":
|
| 190 |
+
# Split by comma and clean
|
| 191 |
+
new_value = [a.strip().lower() for a in new_value.split(",") if a.strip()]
|
| 192 |
+
|
| 193 |
+
# Update the field
|
| 194 |
+
draft[field] = new_value
|
| 195 |
+
draft["updated_at"] = datetime.utcnow().isoformat()
|
| 196 |
+
|
| 197 |
+
logger.info(f"β
Updated field", field=field, old=old_value, new=new_value)
|
| 198 |
+
return draft
|
| 199 |
+
|
| 200 |
+
except Exception as e:
|
| 201 |
+
logger.error(f"β Failed to update field", field=field, exc_info=e)
|
| 202 |
+
return draft
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
# ========== 5. PUBLISH DRAFT TO MONGODB ==========
|
| 206 |
+
async def publish_draft_to_mongodb(
|
| 207 |
+
draft: Dict,
|
| 208 |
+
user_id: str,
|
| 209 |
+
) -> Dict:
|
| 210 |
+
"""
|
| 211 |
+
Save draft to MongoDB as published listing
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
{
|
| 215 |
+
"success": bool,
|
| 216 |
+
"mongo_id": str (if successful),
|
| 217 |
+
"message": str,
|
| 218 |
+
}
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
try:
|
| 222 |
+
db = await get_db()
|
| 223 |
+
listings_col = db["listings"]
|
| 224 |
+
|
| 225 |
+
# Prepare document for MongoDB
|
| 226 |
+
doc = {
|
| 227 |
+
"owner_id": user_id,
|
| 228 |
+
"type": draft.get("listing_type", "rent"),
|
| 229 |
+
"title": draft.get("title", "Property Listing"),
|
| 230 |
+
"description": draft.get("description", ""),
|
| 231 |
+
"location": draft.get("location"),
|
| 232 |
+
"bedrooms": draft.get("bedrooms"),
|
| 233 |
+
"bathrooms": draft.get("bathrooms"),
|
| 234 |
+
"price": draft.get("price"),
|
| 235 |
+
"price_type": draft.get("price_type", "monthly"),
|
| 236 |
+
"currency": draft.get("currency", "XOF"),
|
| 237 |
+
"amenities": draft.get("amenities", []),
|
| 238 |
+
"requirements": draft.get("requirements"),
|
| 239 |
+
"images": draft.get("images", []),
|
| 240 |
+
"status": "published",
|
| 241 |
+
"created_at": datetime.utcnow(),
|
| 242 |
+
"updated_at": datetime.utcnow(),
|
| 243 |
+
"views": 0,
|
| 244 |
+
"favorites": 0,
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
# Insert into MongoDB
|
| 248 |
+
result = await listings_col.insert_one(doc)
|
| 249 |
+
mongo_id = str(result.inserted_id)
|
| 250 |
+
|
| 251 |
+
logger.info("β
Listing published to MongoDB", mongo_id=mongo_id, title=doc["title"])
|
| 252 |
+
|
| 253 |
+
return {
|
| 254 |
+
"success": True,
|
| 255 |
+
"mongo_id": mongo_id,
|
| 256 |
+
"message": f"β
Your listing '{doc['title']}' is now live! Renters can see it.",
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
except Exception as e:
|
| 260 |
+
logger.error("β Failed to publish to MongoDB", exc_info=e)
|
| 261 |
+
return {
|
| 262 |
+
"success": False,
|
| 263 |
+
"mongo_id": None,
|
| 264 |
+
"message": f"β Failed to publish listing: {str(e)}",
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# ========== 6. HANDLE EDIT ACTION ==========
|
| 269 |
+
async def handle_edit_action(
|
| 270 |
+
user_input: str,
|
| 271 |
+
draft: Dict,
|
| 272 |
+
language: str = "en",
|
| 273 |
+
) -> Dict:
|
| 274 |
+
"""
|
| 275 |
+
Handle when user wants to edit a field
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
{
|
| 279 |
+
"action": "ask_which_field" | "ask_new_value" | "show_updated_preview",
|
| 280 |
+
"field": str (if detected),
|
| 281 |
+
"message": str,
|
| 282 |
+
"draft": updated draft (if updated),
|
| 283 |
+
}
|
| 284 |
+
"""
|
| 285 |
+
|
| 286 |
+
# Step 1: Detect which field to edit
|
| 287 |
+
field = await detect_field_to_edit(user_input)
|
| 288 |
+
|
| 289 |
+
if not field:
|
| 290 |
+
# Can't detect field, ask user to specify
|
| 291 |
+
messages = {
|
| 292 |
+
"en": "Which field would you like to change? (location, bedrooms, bathrooms, price, amenities, requirements, title, description)",
|
| 293 |
+
"fr": "Quel champ voulez-vous modifier? (location, bedrooms, bathrooms, price, amenities, requirements, title, description)",
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
return {
|
| 297 |
+
"action": "ask_which_field",
|
| 298 |
+
"message": messages.get(language, messages["en"]),
|
| 299 |
+
"draft": draft,
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
# Step 2: Extract new value from user input
|
| 303 |
+
new_value = await extract_new_field_value(user_input, field)
|
| 304 |
+
|
| 305 |
+
if not new_value:
|
| 306 |
+
# Can't extract value, ask user to provide it
|
| 307 |
+
messages = {
|
| 308 |
+
"en": f"What's the new value for {field}?",
|
| 309 |
+
"fr": f"Quelle est la nouvelle valeur pour {field}?",
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
return {
|
| 313 |
+
"action": "ask_new_value",
|
| 314 |
+
"field": field,
|
| 315 |
+
"message": messages.get(language, messages["en"]),
|
| 316 |
+
"draft": draft,
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
# Step 3: Update draft with new value
|
| 320 |
+
updated_draft = update_draft_field(draft, field, new_value)
|
| 321 |
+
|
| 322 |
+
# Return updated draft (caller will regenerate preview)
|
| 323 |
+
messages = {
|
| 324 |
+
"en": f"β
Updated {field}! Regenerating preview...",
|
| 325 |
+
"fr": f"β
Mis Γ jour {field}! RΓ©gΓ©nΓ©ration de l'aperΓ§u...",
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
return {
|
| 329 |
+
"action": "show_updated_preview",
|
| 330 |
+
"field": field,
|
| 331 |
+
"message": messages.get(language, messages["en"]),
|
| 332 |
+
"draft": updated_draft,
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
# ========== 7. HANDLE DISCARD ACTION ==========
|
| 337 |
+
def handle_discard_action(language: str = "en") -> Dict:
|
| 338 |
+
"""
|
| 339 |
+
Handle when user wants to discard the draft
|
| 340 |
+
"""
|
| 341 |
+
|
| 342 |
+
messages = {
|
| 343 |
+
"en": "β Your draft has been cleared. What would you like to do next?\nβ’ List another property\nβ’ Search for properties\nβ’ View my listings\nβ’ Chat with an agent",
|
| 344 |
+
"fr": "β Votre brouillon a Γ©tΓ© effacΓ©. Que voulez-vous faire ensuite?\nβ’ Lister une autre propriΓ©tΓ©\nβ’ Rechercher des propriΓ©tΓ©s\nβ’ Voir mes annonces\nβ’ Discuter avec un agent",
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
logger.info("β Draft discarded")
|
| 348 |
+
|
| 349 |
+
return {
|
| 350 |
+
"action": "discarded",
|
| 351 |
+
"message": messages.get(language, messages["en"]),
|
| 352 |
+
"state": {
|
| 353 |
+
"status": "idle",
|
| 354 |
+
"draft": None,
|
| 355 |
+
"images": [],
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# ========== 8. COMPLETE POST-DRAFT HANDLER ==========
|
| 361 |
+
async def handle_post_draft_action(
|
| 362 |
+
user_input: str,
|
| 363 |
+
draft: Dict,
|
| 364 |
+
user_id: str,
|
| 365 |
+
language: str = "en",
|
| 366 |
+
) -> Dict:
|
| 367 |
+
"""
|
| 368 |
+
Main handler for post-draft actions
|
| 369 |
+
|
| 370 |
+
Complete flow:
|
| 371 |
+
1. Detect action (publish/edit/discard/add_image)
|
| 372 |
+
2. Route to appropriate handler
|
| 373 |
+
3. Return result
|
| 374 |
+
"""
|
| 375 |
+
|
| 376 |
+
# Detect action
|
| 377 |
+
action = await detect_post_draft_action(user_input, language)
|
| 378 |
+
|
| 379 |
+
logger.info(f"π Handling post-draft action", action=action)
|
| 380 |
+
|
| 381 |
+
# ========== PUBLISH ==========
|
| 382 |
+
if action == "publish":
|
| 383 |
+
result = await publish_draft_to_mongodb(draft, user_id)
|
| 384 |
+
|
| 385 |
+
if result["success"]:
|
| 386 |
+
return {
|
| 387 |
+
"action": "published",
|
| 388 |
+
"success": True,
|
| 389 |
+
"mongo_id": result["mongo_id"],
|
| 390 |
+
"message": result["message"],
|
| 391 |
+
"next_state": {
|
| 392 |
+
"status": "published",
|
| 393 |
+
"draft": draft,
|
| 394 |
+
"mongo_id": result["mongo_id"],
|
| 395 |
+
}
|
| 396 |
+
}
|
| 397 |
+
else:
|
| 398 |
+
return {
|
| 399 |
+
"action": "publish_error",
|
| 400 |
+
"success": False,
|
| 401 |
+
"message": result["message"],
|
| 402 |
+
"draft": draft,
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
# ========== EDIT ==========
|
| 406 |
+
elif action == "edit":
|
| 407 |
+
result = await handle_edit_action(user_input, draft, language)
|
| 408 |
+
|
| 409 |
+
return {
|
| 410 |
+
"action": result["action"],
|
| 411 |
+
"message": result["message"],
|
| 412 |
+
"field": result.get("field"),
|
| 413 |
+
"draft": result["draft"],
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
# ========== DISCARD ==========
|
| 417 |
+
elif action == "discard":
|
| 418 |
+
result = handle_discard_action(language)
|
| 419 |
+
|
| 420 |
+
return {
|
| 421 |
+
"action": result["action"],
|
| 422 |
+
"message": result["message"],
|
| 423 |
+
"next_state": result["state"],
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
# ========== ADD IMAGE ==========
|
| 427 |
+
elif action == "add_image":
|
| 428 |
+
messages = {
|
| 429 |
+
"en": "πΈ Ready to upload more images. Please provide the image.",
|
| 430 |
+
"fr": "πΈ PrΓͺt Γ tΓ©lΓ©charger plus d'images. Veuillez fournir l'image.",
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
return {
|
| 434 |
+
"action": "add_image",
|
| 435 |
+
"message": messages.get(language, messages["en"]),
|
| 436 |
+
"draft": draft,
|
| 437 |
+
}
|
| 438 |
+
|
| 439 |
+
# ========== UNKNOWN ==========
|
| 440 |
+
else:
|
| 441 |
+
messages = {
|
| 442 |
+
"en": "Sorry, I didn't understand. What would you like to do?\nβ’ **Publish** - Post the listing\nβ’ **Edit** - Change something\nβ’ **Add image** - Upload more images\nβ’ **Discard** - Start over",
|
| 443 |
+
"fr": "DΓ©solΓ©, je n'ai pas compris. Que voulez-vous faire?\nβ’ **Publier** - Publier l'annonce\nβ’ **Γditer** - Modifier quelque chose\nβ’ **Ajouter image** - TΓ©lΓ©charger plus d'images\nβ’ **Annuler** - Recommencer",
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
return {
|
| 447 |
+
"action": "unclear",
|
| 448 |
+
"message": messages.get(language, messages["en"]),
|
| 449 |
+
"draft": draft,
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
# ========== TEST ==========
|
| 454 |
+
if __name__ == "__main__":
|
| 455 |
+
import asyncio
|
| 456 |
+
|
| 457 |
+
async def test():
|
| 458 |
+
print("\n" + "="*70)
|
| 459 |
+
print("π POST-DRAFT HANDLER TEST")
|
| 460 |
+
print("="*70 + "\n")
|
| 461 |
+
|
| 462 |
+
# Sample draft
|
| 463 |
+
draft = {
|
| 464 |
+
"title": "Beautiful 2-Bedroom Apartment in Lagos",
|
| 465 |
+
"description": "Spacious apartment in Lagos.",
|
| 466 |
+
"location": "Lagos",
|
| 467 |
+
"bedrooms": 2,
|
| 468 |
+
"bathrooms": 1,
|
| 469 |
+
"price": 50000,
|
| 470 |
+
"price_type": "monthly",
|
| 471 |
+
"listing_type": "rent",
|
| 472 |
+
"currency": "NGN",
|
| 473 |
+
"amenities": ["wifi", "parking"],
|
| 474 |
+
"images": ["https://cloudflare.com/img1.jpg"],
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
# Test 1: Publish
|
| 478 |
+
print("1οΈβ£ TEST: User says 'Publish'")
|
| 479 |
+
result = await handle_post_draft_action("Publish this listing", draft, "user123", "en")
|
| 480 |
+
print(f" Action: {result['action']}")
|
| 481 |
+
print(f" Message: {result['message']}\n")
|
| 482 |
+
|
| 483 |
+
# Test 2: Edit
|
| 484 |
+
print("2οΈβ£ TEST: User says 'Edit the price'")
|
| 485 |
+
result = await handle_post_draft_action("Change the price to 60k", draft, "user123", "en")
|
| 486 |
+
print(f" Action: {result['action']}")
|
| 487 |
+
print(f" Field: {result.get('field')}")
|
| 488 |
+
print(f" Message: {result['message']}")
|
| 489 |
+
if result['action'] == 'show_updated_preview':
|
| 490 |
+
print(f" New price: {result['draft']['price']}\n")
|
| 491 |
+
|
| 492 |
+
# Test 3: Discard
|
| 493 |
+
print("3οΈβ£ TEST: User says 'Discard'")
|
| 494 |
+
result = await handle_post_draft_action("Cancel this, start over", draft, "user123", "en")
|
| 495 |
+
print(f" Action: {result['action']}")
|
| 496 |
+
print(f" Message: {result['message']}\n")
|
| 497 |
+
|
| 498 |
+
asyncio.run(test())
|
app/ai/modules/property_listing.py
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/modules/property_listing.py - Smart Property Listing with ML Integration
|
| 2 |
+
"""
|
| 3 |
+
Smart listing flow with:
|
| 4 |
+
1. Auto-detect listing_type (rent/short-stay/sale/roommate based on price_type)
|
| 5 |
+
2. Auto-detect price_type from keywords
|
| 6 |
+
3. Auto-detect currency from location using ML
|
| 7 |
+
4. Auto-generate title and description
|
| 8 |
+
5. LLM generates dynamic questions instead of hardcoded
|
| 9 |
+
6. User role-based validation (renter can only list roommate)
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import json
|
| 13 |
+
import re
|
| 14 |
+
from typing import Dict, List, Optional, Tuple
|
| 15 |
+
from openai import AsyncOpenAI
|
| 16 |
+
from structlog import get_logger
|
| 17 |
+
from app.config import settings
|
| 18 |
+
from app.ml.models.ml_listing_extractor import get_ml_extractor
|
| 19 |
+
|
| 20 |
+
logger = get_logger(__name__)
|
| 21 |
+
|
| 22 |
+
client = AsyncOpenAI(api_key=settings.DEEPSEEK_API_KEY, base_url=settings.DEEPSEEK_BASE_URL)
|
| 23 |
+
ml_extractor = get_ml_extractor()
|
| 24 |
+
|
| 25 |
+
# ========== REQUIRED FIELDS (only the essentials) ==========
|
| 26 |
+
# price_type and listing_type are auto-detected, not asked!
|
| 27 |
+
REQUIRED_FIELDS = ["location", "bedrooms", "bathrooms", "price"]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# ========== 1. GENERATE EXAMPLE ==========
|
| 31 |
+
async def generate_listing_example(language: str) -> str:
|
| 32 |
+
"""
|
| 33 |
+
Generate a short, detailed example in user's language
|
| 34 |
+
Shows user what a complete listing looks like
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
prompt = f"""Generate a SHORT but DETAILED example of how to list a property on Lojiz in {language.upper()}.
|
| 38 |
+
|
| 39 |
+
The example should include:
|
| 40 |
+
- Number of bedrooms and bathrooms
|
| 41 |
+
- Location (city)
|
| 42 |
+
- Price and frequency (per month, per night, weekly, etc)
|
| 43 |
+
- At least 2-3 amenities (wifi, parking, etc)
|
| 44 |
+
- A requirement or condition
|
| 45 |
+
|
| 46 |
+
Keep it to 2-3 sentences, natural language. Make it sound like a real user describing their property.
|
| 47 |
+
|
| 48 |
+
Format: Just the example text itself, no "Example:" prefix, no quotes around it."""
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
response = await client.chat.completions.create(
|
| 52 |
+
model="deepseek-chat",
|
| 53 |
+
messages=[{"role": "user", "content": prompt}],
|
| 54 |
+
temperature=0.8,
|
| 55 |
+
max_tokens=200,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
example = response.choices[0].message.content.strip()
|
| 59 |
+
logger.info("π Generated listing example", language=language)
|
| 60 |
+
|
| 61 |
+
intro = {
|
| 62 |
+
"en": "Sure! Here's an example of what you could say:",
|
| 63 |
+
"fr": "Bien sΓ»r! Voici un exemple de ce que vous pourriez dire:",
|
| 64 |
+
"es": "Β‘Claro! AquΓ hay un ejemplo de lo que podrΓas decir:",
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
return f"{intro.get(language, intro['en'])}\n\n\"{example}\""
|
| 68 |
+
|
| 69 |
+
except Exception as e:
|
| 70 |
+
logger.error("β Failed to generate example", exc_info=e)
|
| 71 |
+
return "Tell me about your property (location, bedrooms, bathrooms, price, amenities, etc.)"
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
# ========== 2. EXTRACT & AUTO-DETECT FIELDS ==========
|
| 75 |
+
async def extract_and_auto_detect_fields(
|
| 76 |
+
user_input: str,
|
| 77 |
+
user_role: str,
|
| 78 |
+
language: str,
|
| 79 |
+
) -> Dict:
|
| 80 |
+
"""
|
| 81 |
+
Extract fields AND auto-detect:
|
| 82 |
+
- listing_type (from price_type keywords and user_role)
|
| 83 |
+
- price_type (from keywords: monthly, nightly, weekly, daily, yearly)
|
| 84 |
+
- currency (from location using ML)
|
| 85 |
+
|
| 86 |
+
Returns:
|
| 87 |
+
{
|
| 88 |
+
"location": "Lagos",
|
| 89 |
+
"bedrooms": 2,
|
| 90 |
+
"bathrooms": 1,
|
| 91 |
+
"price": 50000,
|
| 92 |
+
"price_type": "monthly", # AUTO-DETECTED
|
| 93 |
+
"listing_type": "rent", # AUTO-DETECTED
|
| 94 |
+
"currency": "NGN", # AUTO-DETECTED from location
|
| 95 |
+
"amenities": ["wifi", "parking"],
|
| 96 |
+
"requirements": "1-month deposit",
|
| 97 |
+
}
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
# Step 1: Initial extraction
|
| 101 |
+
prompt = f"""Extract property listing details from this user input (in {language}):
|
| 102 |
+
|
| 103 |
+
"{user_input}"
|
| 104 |
+
|
| 105 |
+
Extract and return ONLY valid JSON:
|
| 106 |
+
{{
|
| 107 |
+
"location": "city name or null",
|
| 108 |
+
"bedrooms": number or null,
|
| 109 |
+
"bathrooms": number or null,
|
| 110 |
+
"price": number or null,
|
| 111 |
+
"price_type_raw": "raw text mentioning frequency (monthly, per night, weekly, daily, yearly, etc) or null",
|
| 112 |
+
"listing_type_raw": "raw text that might indicate type (for sale, for rent, short stay, sharing, etc) or null",
|
| 113 |
+
"amenities": ["list", "of", "amenities"] or [],
|
| 114 |
+
"requirements": "string or null"
|
| 115 |
+
}}
|
| 116 |
+
|
| 117 |
+
Rules:
|
| 118 |
+
- Extract ONLY what's explicitly mentioned
|
| 119 |
+
- price_type_raw: capture the exact frequency words (e.g., "per month", "monthly", "nightly", "per night", "daily", "weekly", "yearly")
|
| 120 |
+
- For numbers, extract numeric value only
|
| 121 |
+
- Amenities: list as lowercase strings
|
| 122 |
+
- If a field not mentioned, set to null or []
|
| 123 |
+
|
| 124 |
+
Return ONLY JSON."""
|
| 125 |
+
|
| 126 |
+
try:
|
| 127 |
+
response = await client.chat.completions.create(
|
| 128 |
+
model="deepseek-chat",
|
| 129 |
+
messages=[{"role": "user", "content": prompt}],
|
| 130 |
+
temperature=0.1,
|
| 131 |
+
max_tokens=300,
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
raw = response.choices[0].message.content.strip()
|
| 135 |
+
raw = re.sub(r'```json\s*|\s*```', '', raw)
|
| 136 |
+
extracted = json.loads(raw)
|
| 137 |
+
|
| 138 |
+
logger.info("β
Initial extraction done", extracted=extracted)
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.error("β Extraction failed", exc_info=e)
|
| 142 |
+
return {}
|
| 143 |
+
|
| 144 |
+
# Step 2: Auto-detect price_type from keywords
|
| 145 |
+
price_type_raw = extracted.get("price_type_raw", "").lower()
|
| 146 |
+
price_type = "monthly" # Default
|
| 147 |
+
|
| 148 |
+
if any(kw in price_type_raw for kw in ["night", "nightly", "per night", "daily", "day", "week", "weekly"]):
|
| 149 |
+
price_type = "nightly" if any(k in price_type_raw for k in ["night", "nightly"]) else "weekly" if "week" in price_type_raw else "daily"
|
| 150 |
+
elif any(kw in price_type_raw for kw in ["year", "yearly", "annum", "per year"]):
|
| 151 |
+
price_type = "yearly"
|
| 152 |
+
elif any(kw in price_type_raw for kw in ["month", "monthly", "mth", "per month"]):
|
| 153 |
+
price_type = "monthly"
|
| 154 |
+
|
| 155 |
+
logger.info(f"π Auto-detected price_type", price_type_raw=price_type_raw, price_type=price_type)
|
| 156 |
+
|
| 157 |
+
# Step 3: Auto-detect listing_type based on price_type + user_role + keywords
|
| 158 |
+
listing_type = None
|
| 159 |
+
|
| 160 |
+
# If renter, ALWAYS roommate
|
| 161 |
+
if user_role == "renter":
|
| 162 |
+
listing_type = "roommate"
|
| 163 |
+
logger.info("π User is renter β listing_type: roommate")
|
| 164 |
+
else:
|
| 165 |
+
# Landlord: infer from price_type and keywords
|
| 166 |
+
if price_type in ["nightly", "daily", "weekly"]:
|
| 167 |
+
listing_type = "short-stay"
|
| 168 |
+
elif price_type == "yearly" or "for sale" in extracted.get("listing_type_raw", "").lower():
|
| 169 |
+
listing_type = "sale"
|
| 170 |
+
else:
|
| 171 |
+
listing_type = "rent" # Default for landlord
|
| 172 |
+
|
| 173 |
+
logger.info(f"π User is landlord β listing_type: {listing_type}")
|
| 174 |
+
|
| 175 |
+
# Step 4: Auto-detect currency from location using ML
|
| 176 |
+
currency = "XOF" # Default
|
| 177 |
+
location = extracted.get("location")
|
| 178 |
+
|
| 179 |
+
if location:
|
| 180 |
+
try:
|
| 181 |
+
currency, country, city, confidence = await ml_extractor.currency_mgr.get_currency_for_location(location)
|
| 182 |
+
if not currency:
|
| 183 |
+
currency = "XOF"
|
| 184 |
+
logger.info(f"π± Auto-detected currency", location=location, currency=currency, confidence=confidence)
|
| 185 |
+
except Exception as e:
|
| 186 |
+
logger.warning(f"β οΈ Currency detection failed, using default", exc_info=e)
|
| 187 |
+
|
| 188 |
+
# Return complete extracted data
|
| 189 |
+
result = {
|
| 190 |
+
"location": extracted.get("location"),
|
| 191 |
+
"bedrooms": extracted.get("bedrooms"),
|
| 192 |
+
"bathrooms": extracted.get("bathrooms"),
|
| 193 |
+
"price": extracted.get("price"),
|
| 194 |
+
"price_type": price_type, # β
AUTO-DETECTED
|
| 195 |
+
"listing_type": listing_type, # β
AUTO-DETECTED
|
| 196 |
+
"currency": currency, # β
AUTO-DETECTED
|
| 197 |
+
"amenities": extracted.get("amenities", []),
|
| 198 |
+
"requirements": extracted.get("requirements"),
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
logger.info("β
All fields auto-detected", result=result)
|
| 202 |
+
return result
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
# ========== 3. AUTO-GENERATE TITLE ==========
|
| 206 |
+
async def generate_title(state: Dict) -> str:
|
| 207 |
+
"""
|
| 208 |
+
Generate a catchy but short title with location
|
| 209 |
+
|
| 210 |
+
Examples:
|
| 211 |
+
- "Cozy 2-Bed Apartment in Lagos"
|
| 212 |
+
- "Modern 3-Bed Rental in Cotonou"
|
| 213 |
+
- "Luxury Penthouse in Abuja"
|
| 214 |
+
"""
|
| 215 |
+
|
| 216 |
+
bedrooms = state.get("bedrooms", "?")
|
| 217 |
+
location = state.get("location", "Unknown")
|
| 218 |
+
listing_type = state.get("listing_type", "property")
|
| 219 |
+
|
| 220 |
+
prompt = f"""Generate a SHORT, CATCHY title for a real estate listing.
|
| 221 |
+
|
| 222 |
+
Details:
|
| 223 |
+
- {bedrooms} bedrooms
|
| 224 |
+
- Located in {location}
|
| 225 |
+
- Type: {listing_type}
|
| 226 |
+
|
| 227 |
+
Title should be:
|
| 228 |
+
- Under 60 characters
|
| 229 |
+
- Include location
|
| 230 |
+
- Sound professional but friendly
|
| 231 |
+
- One line only
|
| 232 |
+
|
| 233 |
+
Return ONLY the title, no quotes, no explanation."""
|
| 234 |
+
|
| 235 |
+
try:
|
| 236 |
+
response = await client.chat.completions.create(
|
| 237 |
+
model="deepseek-chat",
|
| 238 |
+
messages=[{"role": "user", "content": prompt}],
|
| 239 |
+
temperature=0.7,
|
| 240 |
+
max_tokens=100,
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
title = response.choices[0].message.content.strip()
|
| 244 |
+
logger.info("β¨ Generated title", title=title)
|
| 245 |
+
return title
|
| 246 |
+
|
| 247 |
+
except Exception as e:
|
| 248 |
+
logger.error("β Title generation failed", exc_info=e)
|
| 249 |
+
return f"{bedrooms}-Bedroom {listing_type.title()} in {location}"
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# ========== 4. AUTO-GENERATE DESCRIPTION ==========
|
| 253 |
+
async def generate_description(state: Dict) -> str:
|
| 254 |
+
"""
|
| 255 |
+
Generate a detailed, well-written description
|
| 256 |
+
|
| 257 |
+
Should include:
|
| 258 |
+
- Number of bedrooms/bathrooms
|
| 259 |
+
- Location
|
| 260 |
+
- Key amenities
|
| 261 |
+
- Price info
|
| 262 |
+
- Requirements if any
|
| 263 |
+
"""
|
| 264 |
+
|
| 265 |
+
bedrooms = state.get("bedrooms", "?")
|
| 266 |
+
bathrooms = state.get("bathrooms", "?")
|
| 267 |
+
location = state.get("location", "Unknown")
|
| 268 |
+
price = state.get("price", "?")
|
| 269 |
+
price_type = state.get("price_type", "monthly")
|
| 270 |
+
amenities = ", ".join(state.get("amenities", []))
|
| 271 |
+
requirements = state.get("requirements", "")
|
| 272 |
+
listing_type = state.get("listing_type", "property")
|
| 273 |
+
currency = state.get("currency", "")
|
| 274 |
+
|
| 275 |
+
amenities_text = f"Amenities include {amenities}." if amenities else ""
|
| 276 |
+
requirements_text = f"Requirements: {requirements}" if requirements else ""
|
| 277 |
+
|
| 278 |
+
prompt = f"""Write a professional, detailed description for a real estate listing.
|
| 279 |
+
|
| 280 |
+
Property Details:
|
| 281 |
+
- Bedrooms: {bedrooms}
|
| 282 |
+
- Bathrooms: {bathrooms}
|
| 283 |
+
- Location: {location}
|
| 284 |
+
- Price: {price} {price_type} ({currency})
|
| 285 |
+
- Type: {listing_type}
|
| 286 |
+
- Amenities: {amenities_text}
|
| 287 |
+
- {requirements_text}
|
| 288 |
+
|
| 289 |
+
Write 3-4 sentences that:
|
| 290 |
+
- Sound professional but welcoming
|
| 291 |
+
- Highlight the property's appeal
|
| 292 |
+
- Include all key details
|
| 293 |
+
- Are clear and easy to read
|
| 294 |
+
|
| 295 |
+
Return ONLY the description, no quotes."""
|
| 296 |
+
|
| 297 |
+
try:
|
| 298 |
+
response = await client.chat.completions.create(
|
| 299 |
+
model="deepseek-chat",
|
| 300 |
+
messages=[{"role": "user", "content": prompt}],
|
| 301 |
+
temperature=0.7,
|
| 302 |
+
max_tokens=300,
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
description = response.choices[0].message.content.strip()
|
| 306 |
+
logger.info("π Generated description", length=len(description))
|
| 307 |
+
return description
|
| 308 |
+
|
| 309 |
+
except Exception as e:
|
| 310 |
+
logger.error("β Description generation failed", exc_info=e)
|
| 311 |
+
return f"Beautiful {bedrooms}-bedroom {listing_type} in {location}. Perfect for families or professionals."
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
# ========== 5. GENERATE SMART QUESTION ==========
|
| 315 |
+
async def generate_smart_question(
|
| 316 |
+
missing_field: str,
|
| 317 |
+
language: str,
|
| 318 |
+
context: Optional[str] = None,
|
| 319 |
+
) -> str:
|
| 320 |
+
"""
|
| 321 |
+
LLM generates a natural question for missing field
|
| 322 |
+
NOT hardcoded - AI decides how to ask
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
prompt = f"""You are Aida, a friendly real estate assistant.
|
| 326 |
+
|
| 327 |
+
Generate a NATURAL, FRIENDLY question to ask the user about a missing property detail.
|
| 328 |
+
|
| 329 |
+
Missing field: {missing_field}
|
| 330 |
+
Language: {language}
|
| 331 |
+
Context: {context or "General property listing"}
|
| 332 |
+
|
| 333 |
+
The question should:
|
| 334 |
+
- Be SHORT and DIRECT (one sentence)
|
| 335 |
+
- Sound conversational, not robotic
|
| 336 |
+
- Be in {language}
|
| 337 |
+
- NOT have a prefix like "User:" or "Question:"
|
| 338 |
+
|
| 339 |
+
Return ONLY the question, nothing else."""
|
| 340 |
+
|
| 341 |
+
try:
|
| 342 |
+
response = await client.chat.completions.create(
|
| 343 |
+
model="deepseek-chat",
|
| 344 |
+
messages=[{"role": "user", "content": prompt}],
|
| 345 |
+
temperature=0.7,
|
| 346 |
+
max_tokens=100,
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
question = response.choices[0].message.content.strip()
|
| 350 |
+
logger.info("β Generated smart question", field=missing_field, question=question)
|
| 351 |
+
return question
|
| 352 |
+
|
| 353 |
+
except Exception as e:
|
| 354 |
+
logger.error("β Question generation failed", exc_info=e)
|
| 355 |
+
return f"What is the {missing_field}?"
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
# ========== 6. FIND MISSING FIELDS ==========
|
| 359 |
+
def find_missing_fields(state: Dict) -> List[str]:
|
| 360 |
+
"""Find which required fields are missing"""
|
| 361 |
+
missing = [f for f in REQUIRED_FIELDS if not state.get(f)]
|
| 362 |
+
logger.info("π Missing fields", missing=missing)
|
| 363 |
+
return missing
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
# ========== 7. BUILD LISTING STATE ==========
|
| 367 |
+
async def build_listing_state(
|
| 368 |
+
user_id: str,
|
| 369 |
+
user_role: str,
|
| 370 |
+
language: str,
|
| 371 |
+
user_input: Optional[str] = None,
|
| 372 |
+
current_state: Optional[Dict] = None,
|
| 373 |
+
) -> Dict:
|
| 374 |
+
"""
|
| 375 |
+
Build or update listing state with auto-detection
|
| 376 |
+
"""
|
| 377 |
+
|
| 378 |
+
# Start with current state or empty
|
| 379 |
+
state = current_state.copy() if current_state else {}
|
| 380 |
+
|
| 381 |
+
# If user provided input, extract and auto-detect fields
|
| 382 |
+
if user_input:
|
| 383 |
+
extracted = await extract_and_auto_detect_fields(
|
| 384 |
+
user_input=user_input,
|
| 385 |
+
user_role=user_role,
|
| 386 |
+
language=language,
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
# Merge extracted fields into state (only update non-null values)
|
| 390 |
+
for field, value in extracted.items():
|
| 391 |
+
if value is not None and value != [] and value != "":
|
| 392 |
+
state[field] = value
|
| 393 |
+
|
| 394 |
+
# Find missing required fields
|
| 395 |
+
missing_fields = find_missing_fields(state)
|
| 396 |
+
|
| 397 |
+
# Prepare state dict
|
| 398 |
+
listing_state = {
|
| 399 |
+
"user_id": user_id,
|
| 400 |
+
"user_role": user_role,
|
| 401 |
+
"language": language,
|
| 402 |
+
"status": "collecting",
|
| 403 |
+
"provided_fields": {k: v for k, v in state.items() if k in REQUIRED_FIELDS},
|
| 404 |
+
"missing_fields": missing_fields,
|
| 405 |
+
"amenities": state.get("amenities", []),
|
| 406 |
+
"requirements": state.get("requirements"),
|
| 407 |
+
"raw_state": state,
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
return listing_state
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
# ========== 8. COMPLETE LISTING FLOW ==========
|
| 414 |
+
async def start_listing(
|
| 415 |
+
user_id: str,
|
| 416 |
+
user_role: str,
|
| 417 |
+
language: str,
|
| 418 |
+
) -> Dict:
|
| 419 |
+
"""Start listing - show example and wait for input"""
|
| 420 |
+
|
| 421 |
+
example = await generate_listing_example(language)
|
| 422 |
+
|
| 423 |
+
logger.info("π Started listing flow", user_id=user_id, user_role=user_role)
|
| 424 |
+
|
| 425 |
+
return {
|
| 426 |
+
"action": "show_example_and_wait",
|
| 427 |
+
"example": example,
|
| 428 |
+
"state": {
|
| 429 |
+
"user_id": user_id,
|
| 430 |
+
"user_role": user_role,
|
| 431 |
+
"language": language,
|
| 432 |
+
"status": "awaiting_initial_input",
|
| 433 |
+
"provided_fields": {},
|
| 434 |
+
"missing_fields": REQUIRED_FIELDS.copy(),
|
| 435 |
+
}
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
async def continue_listing(
|
| 440 |
+
user_input: str,
|
| 441 |
+
current_state: Dict,
|
| 442 |
+
) -> Dict:
|
| 443 |
+
"""
|
| 444 |
+
User provided input - extract fields and ask for missing ones
|
| 445 |
+
"""
|
| 446 |
+
|
| 447 |
+
user_id = current_state.get("user_id")
|
| 448 |
+
user_role = current_state.get("user_role")
|
| 449 |
+
language = current_state.get("language", "en")
|
| 450 |
+
|
| 451 |
+
# Build updated state
|
| 452 |
+
updated_state = await build_listing_state(
|
| 453 |
+
user_id=user_id,
|
| 454 |
+
user_role=user_role,
|
| 455 |
+
language=language,
|
| 456 |
+
user_input=user_input,
|
| 457 |
+
current_state=current_state.get("raw_state", {}),
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
missing_fields = updated_state["missing_fields"]
|
| 461 |
+
|
| 462 |
+
if missing_fields:
|
| 463 |
+
# Generate smart question for next field
|
| 464 |
+
next_field = missing_fields[0]
|
| 465 |
+
question = await generate_smart_question(next_field, language)
|
| 466 |
+
|
| 467 |
+
logger.info("π Generated smart question", field=next_field)
|
| 468 |
+
|
| 469 |
+
return {
|
| 470 |
+
"action": "ask_next_field",
|
| 471 |
+
"question": question,
|
| 472 |
+
"state": updated_state,
|
| 473 |
+
}
|
| 474 |
+
else:
|
| 475 |
+
# All required fields complete - ask about optional fields
|
| 476 |
+
optional_msg = await generate_smart_question("amenities and requirements", language, "Optional property details")
|
| 477 |
+
|
| 478 |
+
logger.info("β
All required fields collected")
|
| 479 |
+
|
| 480 |
+
return {
|
| 481 |
+
"action": "ask_optional_fields",
|
| 482 |
+
"message": optional_msg,
|
| 483 |
+
"state": updated_state,
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
async def generate_draft(current_state: Dict) -> Dict:
|
| 488 |
+
"""
|
| 489 |
+
All fields collected - generate title, description, and draft preview
|
| 490 |
+
"""
|
| 491 |
+
|
| 492 |
+
state = current_state.get("raw_state", {})
|
| 493 |
+
|
| 494 |
+
# Generate title and description
|
| 495 |
+
title = await generate_title(state)
|
| 496 |
+
description = await generate_description(state)
|
| 497 |
+
|
| 498 |
+
draft = {
|
| 499 |
+
**state,
|
| 500 |
+
"title": title,
|
| 501 |
+
"description": description,
|
| 502 |
+
"status": "draft",
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
logger.info("π Draft generated", title=title[:50])
|
| 506 |
+
|
| 507 |
+
return {
|
| 508 |
+
"action": "show_draft_preview",
|
| 509 |
+
"draft": draft,
|
| 510 |
+
"state": {**current_state, "draft": draft}
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
# ========== TEST ==========
|
| 515 |
+
if __name__ == "__main__":
|
| 516 |
+
import asyncio
|
| 517 |
+
|
| 518 |
+
async def test():
|
| 519 |
+
print("\n" + "="*70)
|
| 520 |
+
print("π§ͺ SMART PROPERTY LISTING TEST")
|
| 521 |
+
print("="*70 + "\n")
|
| 522 |
+
|
| 523 |
+
# Test 1: Start listing
|
| 524 |
+
print("π TEST 1: Start listing")
|
| 525 |
+
result1 = await start_listing(user_id="user123", user_role="landlord", language="en")
|
| 526 |
+
print(f"β
Example:\n{result1['example']}\n")
|
| 527 |
+
|
| 528 |
+
# Test 2: User provides input
|
| 529 |
+
print("π TEST 2: User provides property details")
|
| 530 |
+
user_input = "I have a 2-bed, 1-bath apartment in Lagos for 50k monthly with wifi and parking"
|
| 531 |
+
result2 = await continue_listing(
|
| 532 |
+
user_input=user_input,
|
| 533 |
+
current_state=result1["state"]
|
| 534 |
+
)
|
| 535 |
+
print(f"β
Action: {result2['action']}")
|
| 536 |
+
print(f"β
State: {result2['state']['provided_fields']}")
|
| 537 |
+
print(f"β
Missing: {result2['state']['missing_fields']}")
|
| 538 |
+
print(f"β
Question: {result2['question']}\n")
|
| 539 |
+
|
| 540 |
+
# Test 3: Generate draft
|
| 541 |
+
print("π TEST 3: Generate draft")
|
| 542 |
+
result2['state']['raw_state']['location'] = 'Lagos' # Assuming extracted
|
| 543 |
+
result3 = await generate_draft(result2["state"])
|
| 544 |
+
print(f"β
Draft Title: {result3['draft']['title']}")
|
| 545 |
+
print(f"β
Draft Description: {result3['draft']['description']}\n")
|
| 546 |
+
|
| 547 |
+
asyncio.run(test())
|
app/ai/nodes/__init__.py
DELETED
|
File without changes
|
app/ai/nodes/__pycache__/__init__.cpython-312.pyc
DELETED
|
Binary file (181 Bytes)
|
|
|
app/ai/nodes/__pycache__/__init__.cpython-313.pyc
DELETED
|
Binary file (186 Bytes)
|
|
|
app/ai/nodes/__pycache__/discard_node.cpython-313.pyc
DELETED
|
Binary file (983 Bytes)
|
|
|
app/ai/nodes/__pycache__/draft_node.cpython-312.pyc
DELETED
|
Binary file (7.53 kB)
|
|
|
app/ai/nodes/__pycache__/draft_node.cpython-313.pyc
DELETED
|
Binary file (10.1 kB)
|
|
|
app/ai/nodes/__pycache__/edit_node.cpython-313.pyc
DELETED
|
Binary file (2.07 kB)
|
|
|
app/ai/nodes/__pycache__/image_node.cpython-312.pyc
DELETED
|
Binary file (3.53 kB)
|
|
|
app/ai/nodes/__pycache__/intent_node.cpython-312.pyc
DELETED
|
Binary file (13.6 kB)
|
|
|
app/ai/nodes/__pycache__/intent_node.cpython-313.pyc
DELETED
|
Binary file (15.1 kB)
|
|
|
app/ai/nodes/__pycache__/publish_node.cpython-312.pyc
DELETED
|
Binary file (4.56 kB)
|
|
|
app/ai/nodes/__pycache__/publish_node.cpython-313.pyc
DELETED
|
Binary file (2.81 kB)
|
|
|
app/ai/nodes/__pycache__/role_gate_node.cpython-312.pyc
DELETED
|
Binary file (2.05 kB)
|
|
|
app/ai/nodes/__pycache__/role_gate_node.cpython-313.pyc
DELETED
|
Binary file (2.06 kB)
|
|
|
app/ai/nodes/__pycache__/search_node.cpython-312.pyc
DELETED
|
Binary file (12.5 kB)
|
|
|
app/ai/nodes/__pycache__/search_node.cpython-313.pyc
DELETED
|
Binary file (14.4 kB)
|
|
|
app/ai/nodes/discard_node.py
DELETED
|
@@ -1,19 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/discard_node.py β CLEAR DRAFT AND RESET
|
| 2 |
-
from typing import Dict
|
| 3 |
-
from structlog import get_logger
|
| 4 |
-
|
| 5 |
-
logger = get_logger(__name__)
|
| 6 |
-
|
| 7 |
-
async def discard_node(state: Dict) -> Dict:
|
| 8 |
-
# wipe listing-specific data
|
| 9 |
-
for key in ["draft", "draft_preview", "location", "bedrooms", "bathrooms",
|
| 10 |
-
"price", "listing_type", "price_type", "amenities", "requirements",
|
| 11 |
-
"missing_fields", "editing_field"]:
|
| 12 |
-
state.pop(key, None)
|
| 13 |
-
|
| 14 |
-
state.update({
|
| 15 |
-
"status": "discarded",
|
| 16 |
-
"ai_reply": "Brouillon supprimΓ©. Que souhaitez-vous faire ensuite ?",
|
| 17 |
-
})
|
| 18 |
-
logger.info("ποΈ Draft discarded")
|
| 19 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/nodes/draft_node.py
DELETED
|
@@ -1,275 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/draft_node.py - COMPLETE WORKING VERSION
|
| 2 |
-
import re
|
| 3 |
-
from typing import Dict, List
|
| 4 |
-
from structlog import get_logger
|
| 5 |
-
|
| 6 |
-
logger = get_logger(__name__)
|
| 7 |
-
|
| 8 |
-
# ============================================
|
| 9 |
-
# AMENITY ICONS
|
| 10 |
-
# ============================================
|
| 11 |
-
AMENITY_ICONS = {
|
| 12 |
-
"wifi": "πΆ", "parking": "π
ΏοΈ", "furnished": "ποΈ", "washing machine": "π§Ό",
|
| 13 |
-
"washing": "π§Ό", "dryer": "π₯", "balcony": "ποΈ", "pool": "π", "gym": "πͺ",
|
| 14 |
-
"garden": "πΏ", "air conditioning": "βοΈ", "ac": "βοΈ", "kitchen": "π³",
|
| 15 |
-
"security": "π", "elevator": "π", "laundry": "π§Ί", "heating": "π₯", "hot water": "πΏ",
|
| 16 |
-
}
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
def add_amenity_icons(amenities: List[str]) -> str:
|
| 20 |
-
"""Convert amenity list to emoji format"""
|
| 21 |
-
if not amenities:
|
| 22 |
-
return ""
|
| 23 |
-
|
| 24 |
-
icons_text = []
|
| 25 |
-
for amenity in amenities:
|
| 26 |
-
amenity_lower = amenity.lower().strip()
|
| 27 |
-
icon = AMENITY_ICONS.get(amenity_lower, "β")
|
| 28 |
-
icons_text.append(f"{icon} {amenity}")
|
| 29 |
-
|
| 30 |
-
return " | ".join(icons_text)
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def generate_title(state: Dict) -> str:
|
| 34 |
-
"""Generate professional title"""
|
| 35 |
-
bedrooms = state.get("bedrooms", "N/A")
|
| 36 |
-
location = state.get("location", "N/A").title()
|
| 37 |
-
listing_type = state.get("listing_type", "rental").title()
|
| 38 |
-
return f"{bedrooms}-Bedroom {listing_type} in {location}"
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
def generate_description(state: Dict) -> str:
|
| 42 |
-
"""Generate professional listing description"""
|
| 43 |
-
bedrooms = state.get("bedrooms")
|
| 44 |
-
bathrooms = state.get("bathrooms")
|
| 45 |
-
location = state.get("location", "").title()
|
| 46 |
-
price = state.get("price", 0)
|
| 47 |
-
price_type = state.get("price_type", "monthly")
|
| 48 |
-
currency = state.get("currency", "XOF")
|
| 49 |
-
amenities = state.get("amenities", [])
|
| 50 |
-
requirements = state.get("requirements")
|
| 51 |
-
|
| 52 |
-
parts = []
|
| 53 |
-
parts.append(f"Spacious {bedrooms}-bedroom, {bathrooms}-bathroom property in {location}.")
|
| 54 |
-
|
| 55 |
-
if amenities:
|
| 56 |
-
amenities_str = ", ".join(amenities)
|
| 57 |
-
parts.append(f"Features include: {amenities_str}.")
|
| 58 |
-
|
| 59 |
-
parts.append(f"Price: {price:,.0f} {currency} per {price_type}.")
|
| 60 |
-
|
| 61 |
-
if requirements:
|
| 62 |
-
if "deposit" in requirements.lower() or "require" in requirements.lower():
|
| 63 |
-
parts.append("Special requirements apply.")
|
| 64 |
-
|
| 65 |
-
return " ".join(parts)
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
async def draft_node(state: Dict) -> Dict:
|
| 69 |
-
"""Process draft creation, preview, edits, and actions"""
|
| 70 |
-
|
| 71 |
-
current_status = state.get("status", "greeting")
|
| 72 |
-
logger.info(f"π Draft Node", status=current_status)
|
| 73 |
-
|
| 74 |
-
# ===== COLLECTING / CHECKING_OPTIONAL =====
|
| 75 |
-
if current_status in ["collecting", "checking_optional"]:
|
| 76 |
-
from app.ai.nodes.intent_node import extract_amenities, get_missing_fields
|
| 77 |
-
|
| 78 |
-
new_amenities = extract_amenities(state["messages"][-1]["content"])
|
| 79 |
-
if new_amenities:
|
| 80 |
-
state["amenities"] = list(set(state.get("amenities", []) + new_amenities))
|
| 81 |
-
|
| 82 |
-
user_msg = state["messages"][-1]["content"].lower()
|
| 83 |
-
if any(w in user_msg for w in ["require", "deposit", "condition", "no ", "must"]):
|
| 84 |
-
state["requirements"] = state["messages"][-1]["content"]
|
| 85 |
-
|
| 86 |
-
missing = get_missing_fields(state)
|
| 87 |
-
state["missing_fields"] = missing
|
| 88 |
-
|
| 89 |
-
if not missing and current_status == "checking_optional":
|
| 90 |
-
state["status"] = "draft_ready"
|
| 91 |
-
|
| 92 |
-
return state
|
| 93 |
-
|
| 94 |
-
# ===== GENERATE DRAFT PREVIEW =====
|
| 95 |
-
if current_status == "draft_ready":
|
| 96 |
-
logger.info("π Generating draft preview")
|
| 97 |
-
|
| 98 |
-
title = generate_title(state)
|
| 99 |
-
description = generate_description(state)
|
| 100 |
-
amenities_with_icons = add_amenity_icons(state.get("amenities", []))
|
| 101 |
-
|
| 102 |
-
draft_preview = {
|
| 103 |
-
"title": title,
|
| 104 |
-
"description": description,
|
| 105 |
-
"location": state.get("location"),
|
| 106 |
-
"bedrooms": state.get("bedrooms"),
|
| 107 |
-
"bathrooms": state.get("bathrooms"),
|
| 108 |
-
"price": state.get("price"),
|
| 109 |
-
"price_type": state.get("price_type"),
|
| 110 |
-
"currency": state.get("currency", "XOF"),
|
| 111 |
-
"listing_type": state.get("listing_type"),
|
| 112 |
-
"amenities": state.get("amenities", []),
|
| 113 |
-
"amenities_with_icons": amenities_with_icons,
|
| 114 |
-
"requirements": state.get("requirements"),
|
| 115 |
-
"images": state.get("image_urls", []),
|
| 116 |
-
}
|
| 117 |
-
|
| 118 |
-
state["draft_preview"] = draft_preview
|
| 119 |
-
|
| 120 |
-
images_section = f"π· {len(draft_preview['images'])} images" if draft_preview['images'] else "π· No images"
|
| 121 |
-
|
| 122 |
-
preview_text = f"""
|
| 123 |
-
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 124 |
-
π {draft_preview['title']}
|
| 125 |
-
ββββββββββββββββββββββββββββββββββββββββββββοΏ½οΏ½βββββββββββββββββββββ
|
| 126 |
-
|
| 127 |
-
π Location: {draft_preview['location']}
|
| 128 |
-
ποΈ Bedrooms: {draft_preview['bedrooms']}
|
| 129 |
-
πΏ Bathrooms: {draft_preview['bathrooms']}
|
| 130 |
-
π° Price: {draft_preview['price']:,.0f} {draft_preview['currency']} per {draft_preview['price_type']}
|
| 131 |
-
|
| 132 |
-
{draft_preview['description']}
|
| 133 |
-
|
| 134 |
-
β¨ Amenities: {draft_preview['amenities_with_icons'] if draft_preview['amenities_with_icons'] else 'None specified'}
|
| 135 |
-
{images_section}
|
| 136 |
-
|
| 137 |
-
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 138 |
-
|
| 139 |
-
Ready? Say 'publish' to make it live, 'edit' to change, or 'discard' to start over.
|
| 140 |
-
"""
|
| 141 |
-
|
| 142 |
-
state["status"] = "preview_shown"
|
| 143 |
-
state["ai_reply"] = preview_text
|
| 144 |
-
logger.info("β
Preview generated")
|
| 145 |
-
|
| 146 |
-
# ===== PREVIEW SHOWN - WAITING FOR ACTION =====
|
| 147 |
-
elif current_status == "preview_shown":
|
| 148 |
-
from app.ai.nodes.intent_node import translate_to_language
|
| 149 |
-
|
| 150 |
-
last_msg = state["messages"][-1]["content"].lower()
|
| 151 |
-
|
| 152 |
-
if any(k in last_msg for k in ["publish", "publier", "go live", "post", "confirm", "yes", "ok", "okay"]):
|
| 153 |
-
state["status"] = "publishing"
|
| 154 |
-
publish_msg = "Publishing your listing..."
|
| 155 |
-
state["ai_reply"] = await translate_to_language(publish_msg, state.get("user_language", "English"))
|
| 156 |
-
logger.info("π€ Publishing triggered")
|
| 157 |
-
return state
|
| 158 |
-
|
| 159 |
-
if any(k in last_msg for k in ["edit", "change", "modifier", "update", "correction", "fix"]):
|
| 160 |
-
state["status"] = "editing"
|
| 161 |
-
edit_msg = "What would you like to change? (e.g., price, bedrooms, location, amenities)"
|
| 162 |
-
state["ai_reply"] = await translate_to_language(edit_msg, state.get("user_language", "English"))
|
| 163 |
-
logger.info("βοΈ Edit mode triggered")
|
| 164 |
-
return state
|
| 165 |
-
|
| 166 |
-
if any(k in last_msg for k in ["discard", "delete", "cancel", "annuler", "remove", "start over"]):
|
| 167 |
-
state["status"] = "discarded"
|
| 168 |
-
for key in ["location", "bedrooms", "bathrooms", "price", "listing_type", "price_type", "amenities", "requirements", "draft_preview", "image_urls"]:
|
| 169 |
-
state.pop(key, None)
|
| 170 |
-
discard_msg = "β
Draft cleared. What would you like to do next?"
|
| 171 |
-
state["ai_reply"] = await translate_to_language(discard_msg, state.get("user_language", "English"))
|
| 172 |
-
logger.info("ποΈ Draft discarded")
|
| 173 |
-
return state
|
| 174 |
-
|
| 175 |
-
state["ai_reply"] = "You can say 'publish' to list it, 'edit' to change something, or 'discard' to start over."
|
| 176 |
-
|
| 177 |
-
# ===== EDITING =====
|
| 178 |
-
elif current_status == "editing":
|
| 179 |
-
from app.ai.nodes.intent_node import extract_number, extract_location
|
| 180 |
-
|
| 181 |
-
last_msg = state["messages"][-1]["content"]
|
| 182 |
-
|
| 183 |
-
if "price" in last_msg.lower():
|
| 184 |
-
new_price = extract_number(last_msg)
|
| 185 |
-
if new_price:
|
| 186 |
-
state["price"] = new_price
|
| 187 |
-
state["ai_reply"] = f"β
Price updated to {new_price:,.0f}! Here's the updated preview:"
|
| 188 |
-
|
| 189 |
-
elif "bedroom" in last_msg.lower() or "bed" in last_msg.lower():
|
| 190 |
-
nums = re.findall(r'(\d+)', last_msg)
|
| 191 |
-
if nums:
|
| 192 |
-
state["bedrooms"] = int(nums[0])
|
| 193 |
-
state["ai_reply"] = f"β
Bedrooms updated to {nums[0]}! Here's the updated preview:"
|
| 194 |
-
|
| 195 |
-
elif "bathroom" in last_msg.lower() or "bath" in last_msg.lower():
|
| 196 |
-
nums = re.findall(r'(\d+)', last_msg)
|
| 197 |
-
if nums:
|
| 198 |
-
state["bathrooms"] = int(nums[0])
|
| 199 |
-
state["ai_reply"] = f"β
Bathrooms updated to {nums[0]}! Here's the updated preview:"
|
| 200 |
-
|
| 201 |
-
elif "location" in last_msg.lower() or "city" in last_msg.lower():
|
| 202 |
-
new_location = extract_location(last_msg)
|
| 203 |
-
if new_location:
|
| 204 |
-
state["location"] = new_location
|
| 205 |
-
try:
|
| 206 |
-
from app.ml.models.ml_listing_extractor import get_ml_extractor
|
| 207 |
-
ml = get_ml_extractor()
|
| 208 |
-
currency, country, city, conf = await ml.infer_currency(state)
|
| 209 |
-
if currency:
|
| 210 |
-
state["currency"] = currency
|
| 211 |
-
except:
|
| 212 |
-
pass
|
| 213 |
-
state["ai_reply"] = f"β
Location updated to {new_location}! Here's the updated preview:"
|
| 214 |
-
|
| 215 |
-
elif "amenities" in last_msg.lower():
|
| 216 |
-
from app.ai.nodes.intent_node import extract_amenities
|
| 217 |
-
new_amenities = extract_amenities(last_msg)
|
| 218 |
-
if new_amenities:
|
| 219 |
-
state["amenities"] = list(set(state.get("amenities", []) + new_amenities))
|
| 220 |
-
state["ai_reply"] = f"β
Amenities updated! Here's the updated preview:"
|
| 221 |
-
|
| 222 |
-
else:
|
| 223 |
-
state["ai_reply"] = "I didn't understand. Please say 'price', 'location', 'bedrooms', 'bathrooms', or 'amenities' and the new value."
|
| 224 |
-
return state
|
| 225 |
-
|
| 226 |
-
# Regenerate preview
|
| 227 |
-
title = generate_title(state)
|
| 228 |
-
description = generate_description(state)
|
| 229 |
-
amenities_with_icons = add_amenity_icons(state.get("amenities", []))
|
| 230 |
-
|
| 231 |
-
draft_preview = {
|
| 232 |
-
"title": title,
|
| 233 |
-
"description": description,
|
| 234 |
-
"location": state.get("location"),
|
| 235 |
-
"bedrooms": state.get("bedrooms"),
|
| 236 |
-
"bathrooms": state.get("bathrooms"),
|
| 237 |
-
"price": state.get("price"),
|
| 238 |
-
"price_type": state.get("price_type"),
|
| 239 |
-
"currency": state.get("currency", "XOF"),
|
| 240 |
-
"listing_type": state.get("listing_type"),
|
| 241 |
-
"amenities": state.get("amenities", []),
|
| 242 |
-
"amenities_with_icons": amenities_with_icons,
|
| 243 |
-
"requirements": state.get("requirements"),
|
| 244 |
-
"images": state.get("image_urls", []),
|
| 245 |
-
}
|
| 246 |
-
|
| 247 |
-
state["draft_preview"] = draft_preview
|
| 248 |
-
|
| 249 |
-
images_section = f"π· {len(draft_preview['images'])} images" if draft_preview['images'] else "π· No images"
|
| 250 |
-
|
| 251 |
-
preview_text = f"""
|
| 252 |
-
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 253 |
-
π {draft_preview['title']}
|
| 254 |
-
ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 255 |
-
|
| 256 |
-
π Location: {draft_preview['location']}
|
| 257 |
-
ποΈ Bedrooms: {draft_preview['bedrooms']}
|
| 258 |
-
πΏ Bathrooms: {draft_preview['bathrooms']}
|
| 259 |
-
π° Price: {draft_preview['price']:,.0f} {draft_preview['currency']} per {draft_preview['price_type']}
|
| 260 |
-
|
| 261 |
-
{draft_preview['description']}
|
| 262 |
-
|
| 263 |
-
β¨ Amenities: {draft_preview['amenities_with_icons'] if draft_preview['amenities_with_icons'] else 'None specified'}
|
| 264 |
-
{images_section}
|
| 265 |
-
|
| 266 |
-
βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 267 |
-
|
| 268 |
-
Ready? Say 'publish' or 'edit' again, or 'discard'.
|
| 269 |
-
"""
|
| 270 |
-
|
| 271 |
-
state["status"] = "preview_shown"
|
| 272 |
-
state["ai_reply"] = preview_text
|
| 273 |
-
logger.info("π Preview regenerated")
|
| 274 |
-
|
| 275 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/nodes/edit_node.py
DELETED
|
@@ -1,36 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/edit_node.py β HANDLE USER EDIT REQUESTS
|
| 2 |
-
from typing import Dict
|
| 3 |
-
from structlog import get_logger
|
| 4 |
-
|
| 5 |
-
logger = get_logger(__name__)
|
| 6 |
-
|
| 7 |
-
_KEYWORDS_MAP = {
|
| 8 |
-
"location": {"location", "where", "lieu", "emplacement", "adresse"},
|
| 9 |
-
"price": {"price", "prix", "cost", "coΓ»t", "loyer"},
|
| 10 |
-
"bedrooms": {"bedroom", "chambre", "piece", "pièce"},
|
| 11 |
-
"bathrooms": {"bathroom", "salle de bain", "toilet", "douche"},
|
| 12 |
-
"amenities": {"amenity", "equipment", "Γ©quipement", "wifi", "parking"},
|
| 13 |
-
"requirements": {"requirement", "condition", "exigence"},
|
| 14 |
-
}
|
| 15 |
-
|
| 16 |
-
def _detect_field_to_edit(text: str) -> str | None:
|
| 17 |
-
txt = text.lower()
|
| 18 |
-
for field, keys in _KEYWORDS_MAP.items():
|
| 19 |
-
if any(k in txt for k in keys):
|
| 20 |
-
return field
|
| 21 |
-
return None
|
| 22 |
-
|
| 23 |
-
async def edit_node(state: Dict) -> Dict:
|
| 24 |
-
last = state["messages"][-1]["content"]
|
| 25 |
-
field = _detect_field_to_edit(last)
|
| 26 |
-
|
| 27 |
-
if field:
|
| 28 |
-
state["editing_field"] = field
|
| 29 |
-
state["status"] = "editing"
|
| 30 |
-
state["ai_reply"] = f"Quelle est la nouvelle valeur pour **{field}** ?"
|
| 31 |
-
else:
|
| 32 |
-
state["status"] = "editing"
|
| 33 |
-
state["ai_reply"] = "Quel champ souhaitez-vous modifier ? (par ex. 'price', 'location', etc.)"
|
| 34 |
-
|
| 35 |
-
logger.info("βοΈ Draft edit requested", field=field)
|
| 36 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/nodes/image_node.py
DELETED
|
@@ -1,63 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/image_node.py
|
| 2 |
-
import base64
|
| 3 |
-
import mimetypes
|
| 4 |
-
from typing import Dict
|
| 5 |
-
import httpx
|
| 6 |
-
from app.config import settings
|
| 7 |
-
from structlog import get_logger
|
| 8 |
-
|
| 9 |
-
logger = get_logger(__name__)
|
| 10 |
-
CF_UPLOAD_URL = f"https://api.cloudflare.com/client/v4/accounts/{settings.CF_ACCOUNT_ID}/images/v1"
|
| 11 |
-
|
| 12 |
-
async def image_node(state: Dict) -> Dict:
|
| 13 |
-
"""
|
| 14 |
-
LangGraph node:
|
| 15 |
-
- expects state["image"] = {"mime": "image/jpeg", "data": "<base64>"}
|
| 16 |
-
- uploads to Cloudflare Images -> public URL
|
| 17 |
-
- appends URL to draft.images[]
|
| 18 |
-
- replies with confirmation
|
| 19 |
-
"""
|
| 20 |
-
image = state.get("image")
|
| 21 |
-
if not image:
|
| 22 |
-
return state
|
| 23 |
-
|
| 24 |
-
mime_type = image["mime"]
|
| 25 |
-
base64_data = image["data"]
|
| 26 |
-
ext = mimetypes.guess_extension(mime_type) or ".jpg"
|
| 27 |
-
filename = f"upload{ext}"
|
| 28 |
-
|
| 29 |
-
try:
|
| 30 |
-
binary = base64.b64decode(base64_data)
|
| 31 |
-
except Exception as e:
|
| 32 |
-
logger.warning("Bad base64 image", exc_info=e)
|
| 33 |
-
state["ai_reply"] = "β Invalid image data. Please try again."
|
| 34 |
-
return state
|
| 35 |
-
|
| 36 |
-
headers = {"Authorization": f"Bearer {settings.CF_API_TOKEN}"}
|
| 37 |
-
files = {"file": (filename, binary, mime_type)}
|
| 38 |
-
|
| 39 |
-
async with httpx.AsyncClient() as client:
|
| 40 |
-
try:
|
| 41 |
-
resp = await client.post(CF_UPLOAD_URL, files=files, headers=headers, timeout=30)
|
| 42 |
-
resp.raise_for_status()
|
| 43 |
-
data = resp.json()
|
| 44 |
-
if not data.get("success"):
|
| 45 |
-
raise ValueError(data.get("errors"))
|
| 46 |
-
public_url = f"https://imagedelivery.net/{data['result']['id']}/public"
|
| 47 |
-
except Exception as e:
|
| 48 |
-
logger.error("Cloudflare upload failed", exc_info=e)
|
| 49 |
-
state["ai_reply"] = "β Could not upload image. Please try again."
|
| 50 |
-
return state
|
| 51 |
-
|
| 52 |
-
# append URL to draft
|
| 53 |
-
draft = state.get("draft")
|
| 54 |
-
if draft:
|
| 55 |
-
if not hasattr(draft, "images"):
|
| 56 |
-
draft.images = []
|
| 57 |
-
draft.images.append(public_url)
|
| 58 |
-
state["ai_reply"] = f"πΈ Picture uploaded! Add more or say **publish** when ready.\n{public_url}"
|
| 59 |
-
else:
|
| 60 |
-
state["ai_reply"] = "πΈ Picture uploaded! Continue describing the property."
|
| 61 |
-
|
| 62 |
-
logger.info("Image uploaded to Cloudflare", url=public_url)
|
| 63 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/nodes/intent_node.py
DELETED
|
@@ -1,423 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/intent_node.py - COMPLETE WORKING VERSION
|
| 2 |
-
import json
|
| 3 |
-
import re
|
| 4 |
-
import os
|
| 5 |
-
from typing import Dict, List, Optional
|
| 6 |
-
from tenacity import retry, stop_after_attempt, wait_exponential
|
| 7 |
-
from openai import AsyncOpenAI
|
| 8 |
-
from structlog import get_logger
|
| 9 |
-
from app.config import settings
|
| 10 |
-
|
| 11 |
-
logger = get_logger(__name__)
|
| 12 |
-
|
| 13 |
-
# ============================================
|
| 14 |
-
# DeepSeek Configuration
|
| 15 |
-
# ============================================
|
| 16 |
-
MODEL = "deepseek-chat"
|
| 17 |
-
MAX_TOKENS = 1000
|
| 18 |
-
TEMP = 0
|
| 19 |
-
|
| 20 |
-
client = AsyncOpenAI(
|
| 21 |
-
api_key=settings.DEEPSEEK_API_KEY,
|
| 22 |
-
base_url=settings.DEEPSEEK_BASE_URL,
|
| 23 |
-
)
|
| 24 |
-
|
| 25 |
-
# ============================================
|
| 26 |
-
# ML EXTRACTOR - Robust extraction
|
| 27 |
-
# ============================================
|
| 28 |
-
try:
|
| 29 |
-
from app.ml.models.ml_listing_extractor import get_ml_extractor
|
| 30 |
-
ml_extractor = get_ml_extractor()
|
| 31 |
-
ML_AVAILABLE = True
|
| 32 |
-
logger.info("β
ML Extractor available")
|
| 33 |
-
except Exception as e:
|
| 34 |
-
ml_extractor = None
|
| 35 |
-
ML_AVAILABLE = False
|
| 36 |
-
logger.warning(f"β οΈ ML Extractor not available: {e}")
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
# ============================================
|
| 40 |
-
# LANGUAGE DETECTION - Using LLM
|
| 41 |
-
# ============================================
|
| 42 |
-
async def detect_language_with_llm(text: str) -> str:
|
| 43 |
-
"""Detect user's language using DeepSeek LLM"""
|
| 44 |
-
try:
|
| 45 |
-
response = await client.chat.completions.create(
|
| 46 |
-
model=MODEL,
|
| 47 |
-
messages=[
|
| 48 |
-
{
|
| 49 |
-
"role": "user",
|
| 50 |
-
"content": f"What language is this text written in? Reply with ONLY the language name:\n\n{text[:200]}"
|
| 51 |
-
}
|
| 52 |
-
],
|
| 53 |
-
temperature=0,
|
| 54 |
-
max_tokens=20,
|
| 55 |
-
)
|
| 56 |
-
language = response.choices[0].message.content.strip()
|
| 57 |
-
logger.info(f"π Language detected: {language}")
|
| 58 |
-
return language
|
| 59 |
-
except Exception as e:
|
| 60 |
-
logger.warning(f"β οΈ Language detection failed: {e}")
|
| 61 |
-
return "English"
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
# ============================================
|
| 65 |
-
# TRANSLATE TEXT - Using LLM
|
| 66 |
-
# ============================================
|
| 67 |
-
async def translate_to_language(text: str, language: str) -> str:
|
| 68 |
-
"""Translate text to user's language"""
|
| 69 |
-
if language.lower() == "english":
|
| 70 |
-
return text
|
| 71 |
-
|
| 72 |
-
try:
|
| 73 |
-
response = await client.chat.completions.create(
|
| 74 |
-
model=MODEL,
|
| 75 |
-
messages=[
|
| 76 |
-
{
|
| 77 |
-
"role": "user",
|
| 78 |
-
"content": f"Translate this to {language} language. Reply with ONLY the translation:\n\n{text}"
|
| 79 |
-
}
|
| 80 |
-
],
|
| 81 |
-
temperature=0,
|
| 82 |
-
max_tokens=300,
|
| 83 |
-
)
|
| 84 |
-
return response.choices[0].message.content.strip()
|
| 85 |
-
except Exception as e:
|
| 86 |
-
logger.warning(f"β οΈ Translation failed: {e}")
|
| 87 |
-
return text
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
# ============================================
|
| 91 |
-
# GENERATE EXAMPLE - Using LLM
|
| 92 |
-
# ============================================
|
| 93 |
-
async def generate_listing_example(language: str) -> str:
|
| 94 |
-
"""Generate property listing example in user's language"""
|
| 95 |
-
try:
|
| 96 |
-
response = await client.chat.completions.create(
|
| 97 |
-
model=MODEL,
|
| 98 |
-
messages=[
|
| 99 |
-
{
|
| 100 |
-
"role": "user",
|
| 101 |
-
"content": f"""Generate a helpful property listing example in {language} language.
|
| 102 |
-
|
| 103 |
-
Show them an example of what they could say when listing a property.
|
| 104 |
-
Include: bedrooms, bathrooms, location, price, amenities, and requirements.
|
| 105 |
-
Make it friendly and realistic.
|
| 106 |
-
End with: "Tell me about your property! π "
|
| 107 |
-
|
| 108 |
-
Start with: "π Here's how to list a property:"
|
| 109 |
-
"""
|
| 110 |
-
}
|
| 111 |
-
],
|
| 112 |
-
temperature=0.3,
|
| 113 |
-
max_tokens=400,
|
| 114 |
-
)
|
| 115 |
-
return response.choices[0].message.content.strip()
|
| 116 |
-
except Exception as e:
|
| 117 |
-
logger.warning(f"β οΈ Example generation failed: {e}")
|
| 118 |
-
return f"π Tell me about the property you want to list!"
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
# ============================================
|
| 122 |
-
# FIELD EXTRACTION HELPERS
|
| 123 |
-
# ============================================
|
| 124 |
-
def detect_listing_type(text: str) -> Optional[str]:
|
| 125 |
-
"""Auto-detect listing type"""
|
| 126 |
-
text_lower = text.lower()
|
| 127 |
-
if any(w in text_lower for w in ["short stay", "airbnb", "nightly", "daily", "weekly"]):
|
| 128 |
-
return "short-stay"
|
| 129 |
-
elif any(w in text_lower for w in ["sale", "sell", "selling", "for sale", "vendre"]):
|
| 130 |
-
return "sale"
|
| 131 |
-
elif any(w in text_lower for w in ["roommate", "sharing", "flatmate", "colocataire"]):
|
| 132 |
-
return "roommate"
|
| 133 |
-
else:
|
| 134 |
-
return "rent"
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
def detect_price_type(text: str) -> Optional[str]:
|
| 138 |
-
"""Auto-detect price type"""
|
| 139 |
-
text_lower = text.lower()
|
| 140 |
-
if any(w in text_lower for w in ["nightly", "night", "daily", "day", "par nuit"]):
|
| 141 |
-
return "nightly"
|
| 142 |
-
elif any(w in text_lower for w in ["yearly", "year", "annually", "par an"]):
|
| 143 |
-
return "yearly"
|
| 144 |
-
else:
|
| 145 |
-
return "monthly"
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
def extract_number(text: str) -> Optional[float]:
|
| 149 |
-
"""Extract first number from text"""
|
| 150 |
-
cleaned = re.sub(r'[^\d.,-]', '', text)
|
| 151 |
-
match = re.search(r'[\d,.]+', cleaned)
|
| 152 |
-
if match:
|
| 153 |
-
num_str = match.group().replace(',', '')
|
| 154 |
-
try:
|
| 155 |
-
return float(num_str)
|
| 156 |
-
except:
|
| 157 |
-
return None
|
| 158 |
-
return None
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
def extract_location(text: str) -> Optional[str]:
|
| 162 |
-
"""Extract location (city name)"""
|
| 163 |
-
cities = {
|
| 164 |
-
"lagos": "lagos", "cotonou": "cotonou", "calavi": "calavi",
|
| 165 |
-
"paris": "paris", "london": "london", "lyon": "lyon",
|
| 166 |
-
"marseille": "marseille", "nairobi": "nairobi", "accra": "accra",
|
| 167 |
-
"johannesburg": "johannesburg", "kinshasa": "kinshasa", "dakar": "dakar",
|
| 168 |
-
"kampala": "kampala", "cape town": "cape town", "madrid": "madrid",
|
| 169 |
-
"barcelona": "barcelona", "lisbon": "lisbon",
|
| 170 |
-
}
|
| 171 |
-
|
| 172 |
-
text_lower = text.lower()
|
| 173 |
-
for city_key, city_val in cities.items():
|
| 174 |
-
if city_key in text_lower:
|
| 175 |
-
return city_val
|
| 176 |
-
return None
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
def extract_amenities(text: str) -> List[str]:
|
| 180 |
-
"""Extract amenities from text"""
|
| 181 |
-
amenities_list = [
|
| 182 |
-
"wifi", "parking", "furnished", "washing machine", "dryer",
|
| 183 |
-
"balcony", "pool", "gym", "garden", "air conditioning", "kitchen",
|
| 184 |
-
"ac", "washer", "elevator", "security", "laundry", "heating", "hot water"
|
| 185 |
-
]
|
| 186 |
-
|
| 187 |
-
found_amenities = []
|
| 188 |
-
text_lower = text.lower()
|
| 189 |
-
|
| 190 |
-
for amenity in amenities_list:
|
| 191 |
-
if amenity in text_lower:
|
| 192 |
-
if amenity == "ac":
|
| 193 |
-
found_amenities.append("air conditioning")
|
| 194 |
-
elif amenity == "washer":
|
| 195 |
-
found_amenities.append("washing machine")
|
| 196 |
-
else:
|
| 197 |
-
found_amenities.append(amenity)
|
| 198 |
-
|
| 199 |
-
return list(set(found_amenities))
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
# ============================================
|
| 203 |
-
# REQUIRED FIELDS CHECK
|
| 204 |
-
# ============================================
|
| 205 |
-
def get_missing_fields(state: Dict) -> List[str]:
|
| 206 |
-
"""Check which required fields are missing"""
|
| 207 |
-
required = ["location", "bedrooms", "bathrooms", "price", "listing_type", "price_type"]
|
| 208 |
-
missing = []
|
| 209 |
-
|
| 210 |
-
for field in required:
|
| 211 |
-
value = state.get(field)
|
| 212 |
-
if value is None or (isinstance(value, str) and value.strip() == ""):
|
| 213 |
-
missing.append(field)
|
| 214 |
-
|
| 215 |
-
return missing
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
def get_next_question_en(missing_fields: List[str]) -> Optional[str]:
|
| 219 |
-
"""Get next question in English"""
|
| 220 |
-
if not missing_fields:
|
| 221 |
-
return None
|
| 222 |
-
|
| 223 |
-
questions = {
|
| 224 |
-
"location": "What city/area is the property in?",
|
| 225 |
-
"bedrooms": "How many bedrooms does it have?",
|
| 226 |
-
"bathrooms": "How many bathrooms?",
|
| 227 |
-
"price": "What's the price?",
|
| 228 |
-
"listing_type": "Is it for rent, short-stay, sale, or roommate?",
|
| 229 |
-
"price_type": "Is that monthly, nightly, or yearly?",
|
| 230 |
-
}
|
| 231 |
-
|
| 232 |
-
return questions.get(missing_fields[0], "Tell me more about the property")
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
# ============================================
|
| 236 |
-
# MAIN INTENT NODE
|
| 237 |
-
# ============================================
|
| 238 |
-
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=5))
|
| 239 |
-
async def intent_node(state: Dict) -> Dict:
|
| 240 |
-
"""Main intent detection & extraction node"""
|
| 241 |
-
|
| 242 |
-
user_id = state.get("user_id", "unknown")
|
| 243 |
-
human_msg = state["messages"][-1]["content"]
|
| 244 |
-
|
| 245 |
-
logger.info(f"π€ Intent Node", user_id=user_id, msg=human_msg[:80])
|
| 246 |
-
|
| 247 |
-
# ===== DETECT LANGUAGE =====
|
| 248 |
-
if "user_language" not in state:
|
| 249 |
-
state["user_language"] = await detect_language_with_llm(human_msg)
|
| 250 |
-
|
| 251 |
-
language = state["user_language"]
|
| 252 |
-
logger.info(f"π Language: {language}")
|
| 253 |
-
|
| 254 |
-
# ===== DETERMINE INTENT =====
|
| 255 |
-
last_msg_lower = human_msg.lower().strip()
|
| 256 |
-
|
| 257 |
-
if any(k in last_msg_lower for k in ["list", "publish", "create", "post", "sell", "rent out", "louer", "vendre"]):
|
| 258 |
-
intent = "list"
|
| 259 |
-
elif any(k in last_msg_lower for k in ["search", "find", "show", "look", "browse", "apartments", "houses"]):
|
| 260 |
-
intent = "search"
|
| 261 |
-
elif any(k in last_msg_lower for k in ["my listings", "my properties", "my apartments", "mes annonces"]):
|
| 262 |
-
intent = "my_listings"
|
| 263 |
-
else:
|
| 264 |
-
intent = "greeting"
|
| 265 |
-
|
| 266 |
-
logger.info(f"π Intent: {intent}")
|
| 267 |
-
|
| 268 |
-
# ===== LIST INTENT =====
|
| 269 |
-
if intent == "list":
|
| 270 |
-
state["intent"] = "list"
|
| 271 |
-
state["allowed"] = True
|
| 272 |
-
|
| 273 |
-
# First time - show example
|
| 274 |
-
is_first_list = not state.get("location") and not state.get("bedrooms")
|
| 275 |
-
|
| 276 |
-
if is_first_list:
|
| 277 |
-
example_msg = await generate_listing_example(language)
|
| 278 |
-
state["status"] = "collecting"
|
| 279 |
-
state["ai_reply"] = example_msg
|
| 280 |
-
logger.info("π Showing example")
|
| 281 |
-
return state
|
| 282 |
-
|
| 283 |
-
# Extract fields using ML
|
| 284 |
-
if ML_AVAILABLE and ml_extractor:
|
| 285 |
-
try:
|
| 286 |
-
location = extract_location(human_msg)
|
| 287 |
-
if location:
|
| 288 |
-
state["location"] = location
|
| 289 |
-
|
| 290 |
-
bedrooms_match = re.search(r'(\d+)\s*(?:bed|bedroom|chambre)', human_msg.lower())
|
| 291 |
-
if bedrooms_match:
|
| 292 |
-
state["bedrooms"] = int(bedrooms_match.group(1))
|
| 293 |
-
|
| 294 |
-
bathrooms_match = re.search(r'(\d+)\s*(?:bath|bathroom|salle de bain)', human_msg.lower())
|
| 295 |
-
if bathrooms_match:
|
| 296 |
-
state["bathrooms"] = int(bathrooms_match.group(1))
|
| 297 |
-
|
| 298 |
-
price = extract_number(human_msg)
|
| 299 |
-
if price and price > 0:
|
| 300 |
-
state["price"] = float(price)
|
| 301 |
-
|
| 302 |
-
state["listing_type"] = detect_listing_type(human_msg)
|
| 303 |
-
state["price_type"] = detect_price_type(human_msg)
|
| 304 |
-
|
| 305 |
-
amenities = extract_amenities(human_msg)
|
| 306 |
-
if amenities:
|
| 307 |
-
state["amenities"] = list(set(state.get("amenities", []) + amenities))
|
| 308 |
-
|
| 309 |
-
if any(w in human_msg.lower() for w in ["require", "deposit", "condition", "need"]):
|
| 310 |
-
state["requirements"] = human_msg
|
| 311 |
-
|
| 312 |
-
# Infer currency
|
| 313 |
-
if state.get("location") and not state.get("currency"):
|
| 314 |
-
try:
|
| 315 |
-
currency, country, city, conf = await ml_extractor.infer_currency(state)
|
| 316 |
-
if currency:
|
| 317 |
-
state["currency"] = currency
|
| 318 |
-
except:
|
| 319 |
-
state["currency"] = "XOF"
|
| 320 |
-
|
| 321 |
-
except Exception as e:
|
| 322 |
-
logger.warning(f"β οΈ ML extraction error: {e}")
|
| 323 |
-
location = extract_location(human_msg)
|
| 324 |
-
if location:
|
| 325 |
-
state["location"] = location
|
| 326 |
-
bedrooms = extract_number(human_msg)
|
| 327 |
-
if bedrooms and bedrooms <= 20:
|
| 328 |
-
state["bedrooms"] = int(bedrooms)
|
| 329 |
-
price = extract_number(human_msg)
|
| 330 |
-
if price and price > 0:
|
| 331 |
-
state["price"] = float(price)
|
| 332 |
-
state["listing_type"] = detect_listing_type(human_msg)
|
| 333 |
-
state["price_type"] = detect_price_type(human_msg)
|
| 334 |
-
amenities = extract_amenities(human_msg)
|
| 335 |
-
if amenities:
|
| 336 |
-
state["amenities"] = list(set(state.get("amenities", []) + amenities))
|
| 337 |
-
state["currency"] = "XOF"
|
| 338 |
-
else:
|
| 339 |
-
location = extract_location(human_msg)
|
| 340 |
-
if location:
|
| 341 |
-
state["location"] = location
|
| 342 |
-
bedrooms = extract_number(human_msg)
|
| 343 |
-
if bedrooms and bedrooms <= 20:
|
| 344 |
-
state["bedrooms"] = int(bedrooms)
|
| 345 |
-
price = extract_number(human_msg)
|
| 346 |
-
if price and price > 0:
|
| 347 |
-
state["price"] = float(price)
|
| 348 |
-
state["listing_type"] = detect_listing_type(human_msg)
|
| 349 |
-
state["price_type"] = detect_price_type(human_msg)
|
| 350 |
-
amenities = extract_amenities(human_msg)
|
| 351 |
-
if amenities:
|
| 352 |
-
state["amenities"] = list(set(state.get("amenities", []) + amenities))
|
| 353 |
-
state["currency"] = "XOF"
|
| 354 |
-
|
| 355 |
-
# Check missing fields
|
| 356 |
-
missing = get_missing_fields(state)
|
| 357 |
-
state["missing_fields"] = missing
|
| 358 |
-
|
| 359 |
-
if missing:
|
| 360 |
-
state["status"] = "collecting"
|
| 361 |
-
question_en = get_next_question_en(missing)
|
| 362 |
-
state["next_question"] = question_en
|
| 363 |
-
state["ai_reply"] = await translate_to_language(question_en, language)
|
| 364 |
-
else:
|
| 365 |
-
state["status"] = "checking_optional"
|
| 366 |
-
optional_q = "Perfect! Any amenities or special requirements?"
|
| 367 |
-
state["ai_reply"] = await translate_to_language(optional_q, language)
|
| 368 |
-
|
| 369 |
-
logger.info(f"π Listing: status={state['status']}, missing={missing}")
|
| 370 |
-
|
| 371 |
-
# ===== SEARCH INTENT =====
|
| 372 |
-
elif intent == "search":
|
| 373 |
-
state["intent"] = "search"
|
| 374 |
-
state["status"] = "searching"
|
| 375 |
-
state["search_query"] = human_msg
|
| 376 |
-
state["allowed"] = True
|
| 377 |
-
|
| 378 |
-
location = extract_location(human_msg)
|
| 379 |
-
if location:
|
| 380 |
-
state["location"] = location
|
| 381 |
-
|
| 382 |
-
bedrooms = extract_number(human_msg)
|
| 383 |
-
if bedrooms:
|
| 384 |
-
state["bedrooms"] = int(bedrooms)
|
| 385 |
-
|
| 386 |
-
amenities = extract_amenities(human_msg)
|
| 387 |
-
if amenities:
|
| 388 |
-
state["amenities"] = amenities
|
| 389 |
-
|
| 390 |
-
prices = re.findall(r'(\d+)[k,.]?', human_msg.lower())
|
| 391 |
-
if prices:
|
| 392 |
-
if len(prices) >= 2:
|
| 393 |
-
state["min_price"] = float(prices[0]) * (1000 if 'k' in human_msg.lower() else 1)
|
| 394 |
-
state["max_price"] = float(prices[1]) * (1000 if 'k' in human_msg.lower() else 1)
|
| 395 |
-
else:
|
| 396 |
-
state["max_price"] = float(prices[0]) * (1000 if 'k' in human_msg.lower() else 1)
|
| 397 |
-
|
| 398 |
-
searching_msg = "Searching for properties..."
|
| 399 |
-
state["ai_reply"] = await translate_to_language(searching_msg, language)
|
| 400 |
-
logger.info(f"π Search initiated")
|
| 401 |
-
|
| 402 |
-
# ===== MY LISTINGS INTENT =====
|
| 403 |
-
elif intent == "my_listings":
|
| 404 |
-
state["intent"] = "my_listings"
|
| 405 |
-
state["status"] = "fetching_listings"
|
| 406 |
-
state["allowed"] = True
|
| 407 |
-
|
| 408 |
-
fetching_msg = "Fetching your listings..."
|
| 409 |
-
state["ai_reply"] = await translate_to_language(fetching_msg, language)
|
| 410 |
-
logger.info(f"π My listings requested")
|
| 411 |
-
|
| 412 |
-
# ===== GREETING =====
|
| 413 |
-
else:
|
| 414 |
-
state["intent"] = "greeting"
|
| 415 |
-
state["status"] = "greeting"
|
| 416 |
-
state["allowed"] = True
|
| 417 |
-
|
| 418 |
-
greeting_msg = "Hello! π I'm Aida, Lojiz's real estate AI assistant. I can help you list a property, search for homes, or chat about real estate. What would you like to do?"
|
| 419 |
-
state["ai_reply"] = await translate_to_language(greeting_msg, language)
|
| 420 |
-
logger.info(f"π Greeting in {language}")
|
| 421 |
-
|
| 422 |
-
logger.info(f"β
Intent node complete", intent=state.get("intent"), status=state.get("status"))
|
| 423 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/nodes/publish_node.py
DELETED
|
@@ -1,53 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/publish_node.py β FIXED STATUS CHECK
|
| 2 |
-
import datetime
|
| 3 |
-
from typing import Dict
|
| 4 |
-
from app.database import get_db
|
| 5 |
-
from structlog import get_logger
|
| 6 |
-
from bson import ObjectId
|
| 7 |
-
|
| 8 |
-
logger = get_logger(__name__)
|
| 9 |
-
|
| 10 |
-
_PUBLISH_WORDS = {"publish", "publier", "go live", "post it", "list it", "confirm", "yes", "ok", "okay"}
|
| 11 |
-
|
| 12 |
-
async def publish_node(state: Dict) -> Dict:
|
| 13 |
-
# ACCEPT BOTH STATUSES
|
| 14 |
-
if state.get("status") not in {"preview_shown", "publishing"} or not state.get("draft_preview"):
|
| 15 |
-
return state
|
| 16 |
-
|
| 17 |
-
draft = state["draft_preview"]
|
| 18 |
-
try:
|
| 19 |
-
db = await get_db()
|
| 20 |
-
col = db["listings"]
|
| 21 |
-
doc = {
|
| 22 |
-
"owner_id": state["user_id"],
|
| 23 |
-
"type": draft["listing_type"],
|
| 24 |
-
"title": draft["title"],
|
| 25 |
-
"description": draft["description"],
|
| 26 |
-
"price": draft["price"],
|
| 27 |
-
"price_type": draft["price_type"],
|
| 28 |
-
"bedrooms": draft["bedrooms"],
|
| 29 |
-
"bathrooms": draft["bathrooms"],
|
| 30 |
-
"location": draft["location"],
|
| 31 |
-
"amenities": draft.get("amenities", []),
|
| 32 |
-
"requirements": draft.get("requirements"),
|
| 33 |
-
"currency": draft.get("currency", "XOF"),
|
| 34 |
-
"images": draft.get("images", []),
|
| 35 |
-
"status": "published",
|
| 36 |
-
"created_at": datetime.datetime.utcnow(),
|
| 37 |
-
"updated_at": datetime.datetime.utcnow(),
|
| 38 |
-
"views": 0,
|
| 39 |
-
"favorites": 0,
|
| 40 |
-
}
|
| 41 |
-
mongo_id = str((await col.insert_one(doc)).inserted_id)
|
| 42 |
-
|
| 43 |
-
state["mongo_id"] = mongo_id
|
| 44 |
-
state["status"] = "published"
|
| 45 |
-
state["ai_reply"] = f"β
**Annonce publiΓ©e !** \n{draft['title']} est maintenant en ligne. π"
|
| 46 |
-
logger.info("β
Listing published", mongo_id=mongo_id, title=draft["title"])
|
| 47 |
-
|
| 48 |
-
except Exception as e:
|
| 49 |
-
logger.error("β Publish failed", exc_info=e)
|
| 50 |
-
state["ai_reply"] = "DΓ©solΓ©, la publication a Γ©chouΓ©. Veuillez rΓ©essayer."
|
| 51 |
-
state["status"] = "error"
|
| 52 |
-
|
| 53 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/nodes/role_gate_node.py
DELETED
|
@@ -1,51 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/role_gate_node.py (final)
|
| 2 |
-
from typing import Dict
|
| 3 |
-
from structlog import get_logger
|
| 4 |
-
|
| 5 |
-
logger = get_logger(__name__)
|
| 6 |
-
|
| 7 |
-
LANDLORD_ALLOWED = {"rent", "short_stay", "sale"}
|
| 8 |
-
RENTER_ALLOWED = {"roommate"}
|
| 9 |
-
|
| 10 |
-
def role_gate_node(state: Dict) -> Dict:
|
| 11 |
-
"""
|
| 12 |
-
LangGraph node:
|
| 13 |
-
- input: state with allowed, listing_type, user_role, intent
|
| 14 |
-
- output: same state (may flip allowed + set ai_reply)
|
| 15 |
-
"""
|
| 16 |
-
# 1. search intent β skip role check entirely
|
| 17 |
-
if state.get("intent") == "search":
|
| 18 |
-
logger.info("Aida passed search intent")
|
| 19 |
-
return state
|
| 20 |
-
|
| 21 |
-
# 2. already blocked by intent node? keep message
|
| 22 |
-
if not state.get("allowed", False):
|
| 23 |
-
logger.info("Aida already denied", reason="intent")
|
| 24 |
-
return state
|
| 25 |
-
|
| 26 |
-
# 3. casual chat (no listing_type) β leave reply untouched
|
| 27 |
-
list_type = state.get("listing_type")
|
| 28 |
-
if list_type is None:
|
| 29 |
-
logger.info("Aida passed casual chat")
|
| 30 |
-
return state
|
| 31 |
-
|
| 32 |
-
# 4. real-estate listing β role check
|
| 33 |
-
allowed_set = LANDLORD_ALLOWED if state["user_role"] == "landlord" else RENTER_ALLOWED
|
| 34 |
-
if list_type not in allowed_set:
|
| 35 |
-
state["allowed"] = False
|
| 36 |
-
if state["user_role"] == "landlord":
|
| 37 |
-
state["ai_reply"] = (
|
| 38 |
-
"As a landlord you can only list for rent, short-stay or sale. "
|
| 39 |
-
"Would you like to list this as a rental instead?"
|
| 40 |
-
)
|
| 41 |
-
else:
|
| 42 |
-
state["ai_reply"] = (
|
| 43 |
-
"As a renter you can only list for roommate matching. "
|
| 44 |
-
"If you want to share your own space, great! Otherwise you can search listings."
|
| 45 |
-
)
|
| 46 |
-
logger.info("Aida denied by role gate", user_role=state["user_role"], type=list_type)
|
| 47 |
-
else:
|
| 48 |
-
state["ai_reply"] = state.get("ai_reply") or "Got it! Let me draft that for you."
|
| 49 |
-
logger.info("Aida approved", user_role=state["user_role"], type=list_type)
|
| 50 |
-
|
| 51 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/nodes/search_node.py
DELETED
|
@@ -1,355 +0,0 @@
|
|
| 1 |
-
# app/ai/nodes/search_node.py - Return UI-ready search results
|
| 2 |
-
import json
|
| 3 |
-
import httpx
|
| 4 |
-
from typing import Dict, List
|
| 5 |
-
from qdrant_client import AsyncQdrantClient, models
|
| 6 |
-
from app.config import settings
|
| 7 |
-
from structlog import get_logger
|
| 8 |
-
|
| 9 |
-
logger = get_logger(__name__)
|
| 10 |
-
|
| 11 |
-
EMBED_MODEL = "qwen/qwen3-embedding-8b"
|
| 12 |
-
TOP_K = 6
|
| 13 |
-
|
| 14 |
-
# ------------------------------------------------------------------
|
| 15 |
-
# Qdrant client
|
| 16 |
-
# ------------------------------------------------------------------
|
| 17 |
-
qdrant_client = AsyncQdrantClient(
|
| 18 |
-
url=settings.QDRANT_URL,
|
| 19 |
-
api_key=settings.QDRANT_API_KEY,
|
| 20 |
-
https=True,
|
| 21 |
-
timeout=60,
|
| 22 |
-
)
|
| 23 |
-
|
| 24 |
-
# ---------- helpers ----------
|
| 25 |
-
def _build_filter(state: Dict) -> models.Filter:
|
| 26 |
-
"""Build comprehensive Qdrant filter from ALL search fields."""
|
| 27 |
-
must = []
|
| 28 |
-
|
| 29 |
-
# Location filter (case-insensitive)
|
| 30 |
-
loc = (state.get("location") or "").lower()
|
| 31 |
-
if loc:
|
| 32 |
-
must.append(
|
| 33 |
-
models.FieldCondition(
|
| 34 |
-
key="location_lower",
|
| 35 |
-
match=models.MatchValue(value=loc)
|
| 36 |
-
)
|
| 37 |
-
)
|
| 38 |
-
|
| 39 |
-
# Price range filters (combine into single condition)
|
| 40 |
-
if state.get("min_price") is not None or state.get("max_price") is not None:
|
| 41 |
-
price_range = {}
|
| 42 |
-
if state.get("min_price") is not None:
|
| 43 |
-
price_range["gte"] = state["min_price"]
|
| 44 |
-
if state.get("max_price") is not None:
|
| 45 |
-
price_range["lte"] = state["max_price"]
|
| 46 |
-
|
| 47 |
-
if price_range:
|
| 48 |
-
must.append(
|
| 49 |
-
models.FieldCondition(
|
| 50 |
-
key="price",
|
| 51 |
-
range=models.Range(**price_range)
|
| 52 |
-
)
|
| 53 |
-
)
|
| 54 |
-
|
| 55 |
-
# Bedrooms filter
|
| 56 |
-
if state.get("bedrooms") is not None:
|
| 57 |
-
must.append(
|
| 58 |
-
models.FieldCondition(
|
| 59 |
-
key="bedrooms",
|
| 60 |
-
match=models.MatchValue(value=state["bedrooms"])
|
| 61 |
-
)
|
| 62 |
-
)
|
| 63 |
-
|
| 64 |
-
# Bathrooms filter
|
| 65 |
-
if state.get("bathrooms") is not None:
|
| 66 |
-
must.append(
|
| 67 |
-
models.FieldCondition(
|
| 68 |
-
key="bathrooms",
|
| 69 |
-
match=models.MatchValue(value=state["bathrooms"])
|
| 70 |
-
)
|
| 71 |
-
)
|
| 72 |
-
|
| 73 |
-
# Price type filter (monthly, nightly, yearly, etc.)
|
| 74 |
-
price_type = (state.get("price_type") or "").lower()
|
| 75 |
-
if price_type:
|
| 76 |
-
must.append(
|
| 77 |
-
models.FieldCondition(
|
| 78 |
-
key="price_type_lower",
|
| 79 |
-
match=models.MatchValue(value=price_type)
|
| 80 |
-
)
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
# Listing type filter (rent, short_stay, roommate, sale)
|
| 84 |
-
listing_type = (state.get("listing_type") or "").lower()
|
| 85 |
-
if listing_type:
|
| 86 |
-
must.append(
|
| 87 |
-
models.FieldCondition(
|
| 88 |
-
key="listing_type_lower",
|
| 89 |
-
match=models.MatchValue(value=listing_type)
|
| 90 |
-
)
|
| 91 |
-
)
|
| 92 |
-
|
| 93 |
-
# Amenities filter (all mentioned amenities must exist in listing)
|
| 94 |
-
amenities = state.get("amenities", [])
|
| 95 |
-
if amenities:
|
| 96 |
-
for amenity in amenities:
|
| 97 |
-
amenity_lower = amenity.lower().strip()
|
| 98 |
-
if amenity_lower:
|
| 99 |
-
must.append(
|
| 100 |
-
models.FieldCondition(
|
| 101 |
-
key="amenities",
|
| 102 |
-
match=models.MatchValue(value=amenity_lower)
|
| 103 |
-
)
|
| 104 |
-
)
|
| 105 |
-
|
| 106 |
-
filt = models.Filter(must=must) if must else models.Filter()
|
| 107 |
-
logger.info("π Filter built", must_conditions=len(must), location=loc,
|
| 108 |
-
min_price=state.get("min_price"), max_price=state.get("max_price"),
|
| 109 |
-
bedrooms=state.get("bedrooms"), bathrooms=state.get("bathrooms"),
|
| 110 |
-
amenities=amenities, price_type=price_type, listing_type=listing_type)
|
| 111 |
-
return filt
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
async def _embed(text: str) -> List[float]:
|
| 115 |
-
"""Call OpenRouter embedding endpoint (async)."""
|
| 116 |
-
payload = {
|
| 117 |
-
"model": EMBED_MODEL,
|
| 118 |
-
"input": text,
|
| 119 |
-
"encoding_format": "float",
|
| 120 |
-
}
|
| 121 |
-
headers = {
|
| 122 |
-
"Authorization": f"Bearer {settings.OPENROUTER_API_KEY}",
|
| 123 |
-
"Content-Type": "application/json",
|
| 124 |
-
"HTTP-Referer": "",
|
| 125 |
-
"X-Title": "",
|
| 126 |
-
}
|
| 127 |
-
async with httpx.AsyncClient(timeout=60) as client:
|
| 128 |
-
resp = await client.post(
|
| 129 |
-
"https://openrouter.ai/api/v1/embeddings",
|
| 130 |
-
headers=headers,
|
| 131 |
-
json=payload,
|
| 132 |
-
)
|
| 133 |
-
resp.raise_for_status()
|
| 134 |
-
return resp.json()["data"][0]["embedding"]
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
# ---------- suggestion helpers ----------
|
| 138 |
-
async def _search_with_must(must: List, vector: List[float]):
|
| 139 |
-
return await qdrant_client.search(
|
| 140 |
-
collection_name="listings",
|
| 141 |
-
query_vector=vector,
|
| 142 |
-
query_filter=models.Filter(must=must),
|
| 143 |
-
limit=TOP_K,
|
| 144 |
-
with_payload=True,
|
| 145 |
-
)
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
def _add_price_range(must: List, state: Dict):
|
| 149 |
-
"""Add combined price range filter."""
|
| 150 |
-
if state.get("min_price") is not None or state.get("max_price") is not None:
|
| 151 |
-
price_range = {}
|
| 152 |
-
if state.get("min_price") is not None:
|
| 153 |
-
price_range["gte"] = state["min_price"]
|
| 154 |
-
if state.get("max_price") is not None:
|
| 155 |
-
price_range["lte"] = state["max_price"]
|
| 156 |
-
|
| 157 |
-
if price_range:
|
| 158 |
-
must.append(models.FieldCondition(key="price", range=models.Range(**price_range)))
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
def _hits_to_cards(hits):
|
| 162 |
-
"""Convert Qdrant hits to UI cards."""
|
| 163 |
-
return [
|
| 164 |
-
{
|
| 165 |
-
"id": hit.id,
|
| 166 |
-
"title": hit.payload.get("title") or f"{hit.payload.get('bedrooms', '')}-bed {hit.payload.get('location', '')}",
|
| 167 |
-
"location": hit.payload.get("location"),
|
| 168 |
-
"price": hit.payload.get("price"),
|
| 169 |
-
"price_type": hit.payload.get("price_type"),
|
| 170 |
-
"bedrooms": hit.payload.get("bedrooms"),
|
| 171 |
-
"bathrooms": hit.payload.get("bathrooms"),
|
| 172 |
-
"amenities": hit.payload.get("amenities", []),
|
| 173 |
-
"description": hit.payload.get("description"),
|
| 174 |
-
"listing_type": hit.payload.get("listing_type"),
|
| 175 |
-
"images": hit.payload.get("images", []), # β
Include images for UI
|
| 176 |
-
"currency": hit.payload.get("currency", "XOF"),
|
| 177 |
-
}
|
| 178 |
-
for hit in hits
|
| 179 |
-
]
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
async def _suggest_relaxed(state: Dict, vector: List[float]) -> List[dict]:
|
| 183 |
-
"""
|
| 184 |
-
Loosen constraints progressively while keeping location strict.
|
| 185 |
-
If no location specified, return empty.
|
| 186 |
-
"""
|
| 187 |
-
loc = (state.get("location") or "").lower()
|
| 188 |
-
br = state.get("bedrooms")
|
| 189 |
-
amenities = state.get("amenities", [])
|
| 190 |
-
|
| 191 |
-
# If no location specified, return empty
|
| 192 |
-
if not loc:
|
| 193 |
-
return []
|
| 194 |
-
|
| 195 |
-
# Location is ALWAYS a hard constraint
|
| 196 |
-
location_filter = models.FieldCondition(
|
| 197 |
-
key="location_lower",
|
| 198 |
-
match=models.MatchValue(value=loc)
|
| 199 |
-
)
|
| 200 |
-
|
| 201 |
-
# 1. Try with all filters intact
|
| 202 |
-
must = [location_filter]
|
| 203 |
-
if br is not None:
|
| 204 |
-
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
|
| 205 |
-
_add_price_range(must, state)
|
| 206 |
-
for amenity in amenities:
|
| 207 |
-
must.append(models.FieldCondition(key="amenities", match=models.MatchValue(value=amenity.lower())))
|
| 208 |
-
hits = await _search_with_must(must, vector)
|
| 209 |
-
if hits:
|
| 210 |
-
return _hits_to_cards(hits)
|
| 211 |
-
|
| 212 |
-
# 2. Loosen amenities (remove optional ones)
|
| 213 |
-
must = [location_filter]
|
| 214 |
-
if br is not None:
|
| 215 |
-
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
|
| 216 |
-
_add_price_range(must, state)
|
| 217 |
-
hits = await _search_with_must(must, vector)
|
| 218 |
-
if hits:
|
| 219 |
-
return _hits_to_cards(hits)
|
| 220 |
-
|
| 221 |
-
# 3. Loosen bedrooms Β±1, keep location strict
|
| 222 |
-
if br is not None:
|
| 223 |
-
must = [location_filter]
|
| 224 |
-
new_br = br - 1 if br > 1 else br + 1
|
| 225 |
-
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=new_br)))
|
| 226 |
-
_add_price_range(must, state)
|
| 227 |
-
hits = await _search_with_must(must, vector)
|
| 228 |
-
if hits:
|
| 229 |
-
return _hits_to_cards(hits)
|
| 230 |
-
|
| 231 |
-
# 4. Loosen price +25%, keep location strict
|
| 232 |
-
must = [location_filter]
|
| 233 |
-
if br is not None:
|
| 234 |
-
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
|
| 235 |
-
if state.get("max_price") is not None:
|
| 236 |
-
relaxed_max = int(state["max_price"] * 1.25)
|
| 237 |
-
must.append(models.FieldCondition(key="price", range=models.Range(lte=relaxed_max)))
|
| 238 |
-
else:
|
| 239 |
-
_add_price_range(must, state)
|
| 240 |
-
hits = await _search_with_must(must, vector)
|
| 241 |
-
if hits:
|
| 242 |
-
return _hits_to_cards(hits)
|
| 243 |
-
|
| 244 |
-
return []
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
# ---------- node ----------
|
| 248 |
-
async def search_node(state: Dict) -> Dict:
|
| 249 |
-
"""
|
| 250 |
-
LangGraph node: comprehensive search with all filters
|
| 251 |
-
- title/description (semantic via embedding)
|
| 252 |
-
- location, price range, bedrooms, bathrooms, amenities (keyword filters)
|
| 253 |
-
- price_type, listing_type
|
| 254 |
-
|
| 255 |
-
β
RETURNS: search_results + search_preview (UI-ready)
|
| 256 |
-
"""
|
| 257 |
-
query = state.get("search_query", "") or state["messages"][-1]["content"]
|
| 258 |
-
vector = await _embed(query)
|
| 259 |
-
filt = _build_filter(state)
|
| 260 |
-
|
| 261 |
-
logger.info("π Searching Qdrant", query=query, filter=str(filt))
|
| 262 |
-
|
| 263 |
-
hits = await qdrant_client.search(
|
| 264 |
-
collection_name="listings",
|
| 265 |
-
query_vector=vector,
|
| 266 |
-
query_filter=filt,
|
| 267 |
-
limit=TOP_K,
|
| 268 |
-
with_payload=True,
|
| 269 |
-
)
|
| 270 |
-
|
| 271 |
-
logger.info("π Qdrant search result", hits_count=len(hits))
|
| 272 |
-
cards = _hits_to_cards(hits)
|
| 273 |
-
|
| 274 |
-
# --- personalize zero-hit reply + suggestions + UI preview
|
| 275 |
-
if not cards:
|
| 276 |
-
location = state.get("location") or "that area"
|
| 277 |
-
bedrooms = state.get("bedrooms")
|
| 278 |
-
price_bit = (
|
| 279 |
-
" in your price range"
|
| 280 |
-
if state.get("min_price") is not None or state.get("max_price") is not None
|
| 281 |
-
else ""
|
| 282 |
-
)
|
| 283 |
-
br_bit = f" with {bedrooms} bedrooms" if bedrooms else ""
|
| 284 |
-
amenities_bit = f" with {', '.join(state.get('amenities', []))}" if state.get("amenities") else ""
|
| 285 |
-
|
| 286 |
-
suggestions = await _suggest_relaxed(state, vector)
|
| 287 |
-
if suggestions:
|
| 288 |
-
state["ai_reply"] = (
|
| 289 |
-
f"I found no exact match for your request, "
|
| 290 |
-
f"but you might like these similar options:"
|
| 291 |
-
)
|
| 292 |
-
state["search_preview"] = { # β
NEW: UI-ready preview for suggestions
|
| 293 |
-
"type": "search_results",
|
| 294 |
-
"count": len(suggestions),
|
| 295 |
-
"query": query,
|
| 296 |
-
"filters": {
|
| 297 |
-
"location": state.get("location"),
|
| 298 |
-
"min_price": state.get("min_price"),
|
| 299 |
-
"max_price": state.get("max_price"),
|
| 300 |
-
"bedrooms": state.get("bedrooms"),
|
| 301 |
-
"bathrooms": state.get("bathrooms"),
|
| 302 |
-
"price_type": state.get("price_type"),
|
| 303 |
-
"listing_type": state.get("listing_type"),
|
| 304 |
-
"amenities": state.get("amenities", []),
|
| 305 |
-
},
|
| 306 |
-
"results": suggestions,
|
| 307 |
-
"message": "Similar options available"
|
| 308 |
-
}
|
| 309 |
-
state["search_results"] = suggestions
|
| 310 |
-
else:
|
| 311 |
-
state["ai_reply"] = (
|
| 312 |
-
f"I found no property in {location}{price_bit}{br_bit}{amenities_bit}. "
|
| 313 |
-
"Try widening your search or check back later!"
|
| 314 |
-
)
|
| 315 |
-
state["search_preview"] = { # β
Empty UI state
|
| 316 |
-
"type": "search_results",
|
| 317 |
-
"count": 0,
|
| 318 |
-
"query": query,
|
| 319 |
-
"filters": {
|
| 320 |
-
"location": state.get("location"),
|
| 321 |
-
"min_price": state.get("min_price"),
|
| 322 |
-
"max_price": state.get("max_price"),
|
| 323 |
-
"bedrooms": state.get("bedrooms"),
|
| 324 |
-
"bathrooms": state.get("bathrooms"),
|
| 325 |
-
"price_type": state.get("price_type"),
|
| 326 |
-
"listing_type": state.get("listing_type"),
|
| 327 |
-
"amenities": state.get("amenities", []),
|
| 328 |
-
},
|
| 329 |
-
"results": [],
|
| 330 |
-
"message": "No results found"
|
| 331 |
-
}
|
| 332 |
-
else:
|
| 333 |
-
state["ai_reply"] = f"Here are {len(cards)} places I found for you:"
|
| 334 |
-
# β
NEW: Return UI-ready search preview
|
| 335 |
-
state["search_preview"] = {
|
| 336 |
-
"type": "search_results",
|
| 337 |
-
"count": len(cards),
|
| 338 |
-
"query": query,
|
| 339 |
-
"filters": {
|
| 340 |
-
"location": state.get("location"),
|
| 341 |
-
"min_price": state.get("min_price"),
|
| 342 |
-
"max_price": state.get("max_price"),
|
| 343 |
-
"bedrooms": state.get("bedrooms"),
|
| 344 |
-
"bathrooms": state.get("bathrooms"),
|
| 345 |
-
"price_type": state.get("price_type"),
|
| 346 |
-
"listing_type": state.get("listing_type"),
|
| 347 |
-
"amenities": state.get("amenities", []),
|
| 348 |
-
},
|
| 349 |
-
"results": cards,
|
| 350 |
-
"message": f"Found {len(cards)} listings"
|
| 351 |
-
}
|
| 352 |
-
state["search_results"] = cards
|
| 353 |
-
|
| 354 |
-
logger.info("Aida search finished", query=query, count=len(cards), suggestions=len(state.get("suggestions", [])))
|
| 355 |
-
return state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/routes/chat.py
CHANGED
|
@@ -1,4 +1,13 @@
|
|
| 1 |
-
# app/routes/chat.py -
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from fastapi import APIRouter, Depends, HTTPException
|
| 3 |
from fastapi.security import HTTPBearer
|
| 4 |
from pydantic import BaseModel
|
|
@@ -6,107 +15,469 @@ from typing import Optional, List, Dict, Any
|
|
| 6 |
from structlog import get_logger
|
| 7 |
|
| 8 |
from app.guards.jwt_guard import decode_access_token
|
| 9 |
-
from app.ai.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
logger = get_logger(__name__)
|
| 12 |
|
| 13 |
router = APIRouter()
|
| 14 |
security = HTTPBearer()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
-
# ============================================
|
| 18 |
-
# REQUEST/RESPONSE MODELS
|
| 19 |
-
# ============================================
|
| 20 |
class AskBody(BaseModel):
|
| 21 |
-
"""Chat request body"""
|
| 22 |
message: str
|
|
|
|
| 23 |
thread_id: Optional[str] = None
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
-
class
|
| 27 |
-
|
| 28 |
-
text: str
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
missing_fields: Optional[List[str]] = None # Fields still needed
|
| 34 |
-
next_question: Optional[str] = None # Next question to ask
|
| 35 |
-
error: Optional[str] = None # Error message if any
|
| 36 |
|
| 37 |
|
| 38 |
-
#
|
| 39 |
-
|
| 40 |
-
# ============================================
|
| 41 |
-
@router.post("/ask", response_model=AskResponse)
|
| 42 |
async def ask_ai(
|
| 43 |
body: AskBody,
|
| 44 |
token: str = Depends(security),
|
| 45 |
-
):
|
| 46 |
"""
|
| 47 |
-
Main chat endpoint
|
| 48 |
|
| 49 |
Flow:
|
| 50 |
-
1. Authenticate user
|
| 51 |
-
2.
|
| 52 |
-
3.
|
| 53 |
-
4.
|
|
|
|
|
|
|
|
|
|
| 54 |
"""
|
| 55 |
|
| 56 |
-
logger.info(f"π¨ /ask endpoint called", msg=body.message[:50])
|
| 57 |
-
|
| 58 |
-
# ===== AUTHENTICATE =====
|
| 59 |
try:
|
|
|
|
| 60 |
payload = decode_access_token(token.credentials)
|
| 61 |
if not payload:
|
|
|
|
| 62 |
raise HTTPException(status_code=401, detail="Invalid token")
|
| 63 |
|
| 64 |
-
user_id = payload
|
| 65 |
-
user_role = payload.get("role", "
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
try:
|
| 82 |
-
result = await aida_chat_sync(
|
| 83 |
user_id=user_id,
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
)
|
| 87 |
|
| 88 |
-
logger.info(
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
status=result.get("status"),
|
| 94 |
-
draft_preview=result.get("draft_preview"),
|
| 95 |
-
search_results=result.get("search_results"),
|
| 96 |
-
missing_fields=result.get("missing_fields"),
|
| 97 |
-
next_question=result.get("next_question"),
|
| 98 |
-
error=result.get("error"),
|
| 99 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
|
|
|
|
|
|
| 101 |
except Exception as e:
|
| 102 |
-
logger.error(
|
| 103 |
raise HTTPException(
|
| 104 |
status_code=500,
|
| 105 |
-
detail=f"
|
| 106 |
)
|
| 107 |
|
| 108 |
|
|
|
|
| 109 |
@router.get("/health")
|
| 110 |
-
async def
|
| 111 |
-
"""Health check"""
|
| 112 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/routes/chat.py - Integrated Chat Endpoint
|
| 2 |
+
"""
|
| 3 |
+
Complete chat endpoint using all modules:
|
| 4 |
+
- Intent detection
|
| 5 |
+
- Property listing service
|
| 6 |
+
- Image upload
|
| 7 |
+
- Draft & preview
|
| 8 |
+
- Redis memory
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
from fastapi import APIRouter, Depends, HTTPException
|
| 12 |
from fastapi.security import HTTPBearer
|
| 13 |
from pydantic import BaseModel
|
|
|
|
| 15 |
from structlog import get_logger
|
| 16 |
|
| 17 |
from app.guards.jwt_guard import decode_access_token
|
| 18 |
+
from app.ai.memory.redis_context_memory import get_current_memory
|
| 19 |
+
from app.ai.intent_detector import detect_intent_with_llm, build_smart_response
|
| 20 |
+
from app.ai.services.property_listing_service import PropertyListingService
|
| 21 |
+
from app.ai.modules.post_draft_handler import handle_post_draft_action
|
| 22 |
+
from app.ai.memory.redis_memory import is_rate_limited
|
| 23 |
|
| 24 |
logger = get_logger(__name__)
|
| 25 |
|
| 26 |
router = APIRouter()
|
| 27 |
security = HTTPBearer()
|
| 28 |
+
listing_service = PropertyListingService()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# ========== REQUEST/RESPONSE MODELS ==========
|
| 32 |
+
class MessageHistory(BaseModel):
|
| 33 |
+
role: str
|
| 34 |
+
content: str
|
| 35 |
+
timestamp: Optional[str] = None
|
| 36 |
|
| 37 |
|
|
|
|
|
|
|
|
|
|
| 38 |
class AskBody(BaseModel):
|
|
|
|
| 39 |
message: str
|
| 40 |
+
session_id: Optional[str] = None
|
| 41 |
thread_id: Optional[str] = None
|
| 42 |
+
image_data: Optional[Dict[str, str]] = None # For image uploads: {mime: "image/jpeg", data: "base64..."}
|
| 43 |
|
| 44 |
|
| 45 |
+
class ChatResponse(BaseModel):
|
| 46 |
+
success: bool
|
| 47 |
+
text: str
|
| 48 |
+
action: str
|
| 49 |
+
state: Optional[Dict[str, Any]] = None
|
| 50 |
+
mongo_id: Optional[str] = None
|
| 51 |
+
error: Optional[str] = None
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
|
| 54 |
+
# ========== MAIN CHAT ENDPOINT ==========
|
| 55 |
+
@router.post("/ask", response_model=ChatResponse)
|
|
|
|
|
|
|
| 56 |
async def ask_ai(
|
| 57 |
body: AskBody,
|
| 58 |
token: str = Depends(security),
|
| 59 |
+
) -> ChatResponse:
|
| 60 |
"""
|
| 61 |
+
Main chat endpoint - handles all Aida functionality
|
| 62 |
|
| 63 |
Flow:
|
| 64 |
+
1. Authenticate user
|
| 65 |
+
2. Check rate limit
|
| 66 |
+
3. Get Redis memory
|
| 67 |
+
4. Detect intent
|
| 68 |
+
5. Route to appropriate handler
|
| 69 |
+
6. Save to memory
|
| 70 |
+
7. Return response
|
| 71 |
"""
|
| 72 |
|
|
|
|
|
|
|
|
|
|
| 73 |
try:
|
| 74 |
+
# ========== 1. AUTHENTICATE ==========
|
| 75 |
payload = decode_access_token(token.credentials)
|
| 76 |
if not payload:
|
| 77 |
+
logger.warning("π Invalid token")
|
| 78 |
raise HTTPException(status_code=401, detail="Invalid token")
|
| 79 |
|
| 80 |
+
user_id = payload["user_id"]
|
| 81 |
+
user_role = payload.get("role", "renter") # landlord or renter
|
| 82 |
|
| 83 |
+
# ========== 2. RATE LIMIT CHECK ==========
|
| 84 |
+
if await is_rate_limited(user_id):
|
| 85 |
+
logger.warning("β οΈ Rate limit exceeded", user_id=user_id)
|
| 86 |
+
raise HTTPException(status_code=429, detail="Rate limit exceeded")
|
| 87 |
+
|
| 88 |
+
# ========== 3. GET REDIS MEMORY ==========
|
| 89 |
+
session_id = body.session_id or "default"
|
| 90 |
+
memory = await get_current_memory(user_id, session_id)
|
| 91 |
+
|
| 92 |
+
# Get current context from Redis
|
| 93 |
+
context = await memory.get_context()
|
| 94 |
+
|
| 95 |
+
logger.info(
|
| 96 |
+
"π¬ Chat message received",
|
|
|
|
|
|
|
| 97 |
user_id=user_id,
|
| 98 |
+
session_id=session_id,
|
| 99 |
+
status=context.get("status"),
|
| 100 |
+
message_length=len(body.message)
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# ========== 4. ADD USER MESSAGE TO MEMORY ==========
|
| 104 |
+
await memory.add_message("user", body.message)
|
| 105 |
+
|
| 106 |
+
# ========== 5. DETECT INTENT ==========
|
| 107 |
+
intent, confidence, intent_meta = await detect_intent_with_llm(
|
| 108 |
+
body.message,
|
| 109 |
+
conversation_context=context
|
| 110 |
)
|
| 111 |
|
| 112 |
+
logger.info(
|
| 113 |
+
"π― Intent detected",
|
| 114 |
+
intent=intent,
|
| 115 |
+
confidence=confidence,
|
| 116 |
+
language=intent_meta.get("language")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
)
|
| 118 |
+
|
| 119 |
+
# Update language in context
|
| 120 |
+
if intent_meta.get("language"):
|
| 121 |
+
await memory.update_context({"language": intent_meta["language"]})
|
| 122 |
+
context["language"] = intent_meta["language"]
|
| 123 |
+
|
| 124 |
+
language = context.get("language", "en")
|
| 125 |
+
|
| 126 |
+
# ========== 6. ROUTE TO APPROPRIATE HANDLER ==========
|
| 127 |
+
|
| 128 |
+
# ========== HANDLE IMAGE UPLOAD ==========
|
| 129 |
+
if body.image_data:
|
| 130 |
+
logger.info("πΈ Image upload request")
|
| 131 |
+
|
| 132 |
+
image_base64 = body.image_data.get("data")
|
| 133 |
+
mime_type = body.image_data.get("mime", "image/jpeg")
|
| 134 |
+
|
| 135 |
+
if not image_base64:
|
| 136 |
+
return ChatResponse(
|
| 137 |
+
success=False,
|
| 138 |
+
text="No image data provided",
|
| 139 |
+
action="error",
|
| 140 |
+
error="Invalid image data"
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Upload image
|
| 144 |
+
upload_result = await listing_service.upload_image(
|
| 145 |
+
image_base64=image_base64,
|
| 146 |
+
mime_type=mime_type,
|
| 147 |
+
current_state={
|
| 148 |
+
**context,
|
| 149 |
+
"raw_state": context.get("state", {})
|
| 150 |
+
}
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# Update memory
|
| 154 |
+
await memory.update_context({
|
| 155 |
+
"state": upload_result["state"].get("raw_state", {})
|
| 156 |
+
})
|
| 157 |
+
|
| 158 |
+
# Add response to memory
|
| 159 |
+
response_msg = upload_result["message"]
|
| 160 |
+
await memory.add_message("assistant", response_msg)
|
| 161 |
+
|
| 162 |
+
return ChatResponse(
|
| 163 |
+
success=True,
|
| 164 |
+
text=response_msg,
|
| 165 |
+
action=upload_result["action"],
|
| 166 |
+
state=upload_result["state"]
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
# ========== HANDLE LISTING INTENT ==========
|
| 170 |
+
if intent == "LIST_PROPERTY":
|
| 171 |
+
logger.info("π Listing intent detected")
|
| 172 |
+
|
| 173 |
+
# Check if starting new listing or continuing
|
| 174 |
+
if context.get("status") != "listing":
|
| 175 |
+
# Start new listing
|
| 176 |
+
result = await listing_service.start_listing(
|
| 177 |
+
user_id=user_id,
|
| 178 |
+
user_role=user_role,
|
| 179 |
+
language=language
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
# Update memory
|
| 183 |
+
await memory.update_context({
|
| 184 |
+
"status": "listing",
|
| 185 |
+
"user_role": user_role,
|
| 186 |
+
"language": language,
|
| 187 |
+
})
|
| 188 |
+
|
| 189 |
+
response_text = result["example"]
|
| 190 |
+
await memory.add_message("assistant", response_text)
|
| 191 |
+
|
| 192 |
+
return ChatResponse(
|
| 193 |
+
success=True,
|
| 194 |
+
text=response_text,
|
| 195 |
+
action="show_example",
|
| 196 |
+
state=result["state"]
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
else:
|
| 200 |
+
# Continue listing
|
| 201 |
+
result = await listing_service.continue_listing(
|
| 202 |
+
user_input=body.message,
|
| 203 |
+
current_state={
|
| 204 |
+
**context,
|
| 205 |
+
"raw_state": context.get("state", {})
|
| 206 |
+
}
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
# Update memory with new state
|
| 210 |
+
new_state = result["state"]
|
| 211 |
+
await memory.update_context({
|
| 212 |
+
"state": new_state.get("raw_state", {}),
|
| 213 |
+
"status": new_state.get("status", "listing"),
|
| 214 |
+
"draft": new_state.get("draft"),
|
| 215 |
+
})
|
| 216 |
+
|
| 217 |
+
response_text = result.get("question") or result.get("message", "")
|
| 218 |
+
await memory.add_message("assistant", response_text)
|
| 219 |
+
|
| 220 |
+
return ChatResponse(
|
| 221 |
+
success=True,
|
| 222 |
+
text=response_text,
|
| 223 |
+
action=result["action"],
|
| 224 |
+
state=new_state
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# ========== HANDLE SEARCH INTENT ==========
|
| 228 |
+
elif intent == "SEARCH_PROPERTY":
|
| 229 |
+
logger.info("π Search intent detected")
|
| 230 |
+
|
| 231 |
+
# Import search service (you'll build this)
|
| 232 |
+
# For now, return placeholder
|
| 233 |
+
response_text = "Search feature coming soon! For now, you can list or talk to an agent."
|
| 234 |
+
await memory.add_message("assistant", response_text)
|
| 235 |
+
|
| 236 |
+
return ChatResponse(
|
| 237 |
+
success=True,
|
| 238 |
+
text=response_text,
|
| 239 |
+
action="search",
|
| 240 |
+
state=context
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
# ========== HANDLE POST-DRAFT ACTIONS ==========
|
| 244 |
+
elif intent == "PUBLISH_DRAFT" or intent == "EDIT_FIELD" or intent == "DISCARD_DRAFT":
|
| 245 |
+
logger.info(f"π Post-draft action: {intent}")
|
| 246 |
+
|
| 247 |
+
if context.get("status") not in ["preview_shown", "waiting_for_images"]:
|
| 248 |
+
response_text = "You don't have a draft to publish. Would you like to list a property?"
|
| 249 |
+
await memory.add_message("assistant", response_text)
|
| 250 |
+
|
| 251 |
+
return ChatResponse(
|
| 252 |
+
success=True,
|
| 253 |
+
text=response_text,
|
| 254 |
+
action="error",
|
| 255 |
+
state=context
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
draft = context.get("draft")
|
| 259 |
+
if not draft:
|
| 260 |
+
response_text = "Draft not found. Please start over."
|
| 261 |
+
await memory.add_message("assistant", response_text)
|
| 262 |
+
|
| 263 |
+
return ChatResponse(
|
| 264 |
+
success=True,
|
| 265 |
+
text=response_text,
|
| 266 |
+
action="error",
|
| 267 |
+
state=context
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
# Handle the action
|
| 271 |
+
result = await handle_post_draft_action(
|
| 272 |
+
user_input=body.message,
|
| 273 |
+
draft=draft,
|
| 274 |
+
user_id=user_id,
|
| 275 |
+
language=language
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
response_text = result["message"]
|
| 279 |
+
|
| 280 |
+
# ========== PUBLISHED ==========
|
| 281 |
+
if result["action"] == "published":
|
| 282 |
+
logger.info("β
Listing published", mongo_id=result["mongo_id"])
|
| 283 |
+
|
| 284 |
+
# Clear draft from memory
|
| 285 |
+
await memory.update_context({
|
| 286 |
+
"status": "idle",
|
| 287 |
+
"draft": None,
|
| 288 |
+
"state": {},
|
| 289 |
+
})
|
| 290 |
+
|
| 291 |
+
await memory.add_message("assistant", response_text)
|
| 292 |
+
|
| 293 |
+
return ChatResponse(
|
| 294 |
+
success=True,
|
| 295 |
+
text=response_text,
|
| 296 |
+
action="published",
|
| 297 |
+
mongo_id=result["mongo_id"],
|
| 298 |
+
state={**context, "status": "idle"}
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# ========== PUBLISH ERROR ==========
|
| 302 |
+
elif result["action"] == "publish_error":
|
| 303 |
+
logger.error("β Publish failed")
|
| 304 |
+
|
| 305 |
+
await memory.add_message("assistant", response_text)
|
| 306 |
+
|
| 307 |
+
return ChatResponse(
|
| 308 |
+
success=False,
|
| 309 |
+
text=response_text,
|
| 310 |
+
action="publish_error",
|
| 311 |
+
error=response_text,
|
| 312 |
+
state=context
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
# ========== SHOW UPDATED PREVIEW ==========
|
| 316 |
+
elif result["action"] == "show_updated_preview":
|
| 317 |
+
logger.info("βοΈ Draft field updated")
|
| 318 |
+
|
| 319 |
+
# Update memory with new draft
|
| 320 |
+
await memory.update_context({
|
| 321 |
+
"draft": result["draft"]
|
| 322 |
+
})
|
| 323 |
+
|
| 324 |
+
await memory.add_message("assistant", response_text)
|
| 325 |
+
|
| 326 |
+
return ChatResponse(
|
| 327 |
+
success=True,
|
| 328 |
+
text=response_text,
|
| 329 |
+
action="show_updated_preview",
|
| 330 |
+
state=context
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
# ========== DISCARDED ==========
|
| 334 |
+
elif result["action"] == "discarded":
|
| 335 |
+
logger.info("β Draft discarded")
|
| 336 |
+
|
| 337 |
+
await memory.update_context(result["next_state"])
|
| 338 |
+
await memory.add_message("assistant", response_text)
|
| 339 |
+
|
| 340 |
+
return ChatResponse(
|
| 341 |
+
success=True,
|
| 342 |
+
text=response_text,
|
| 343 |
+
action="discarded",
|
| 344 |
+
state=result["next_state"]
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
# ========== ADD IMAGE ==========
|
| 348 |
+
elif result["action"] == "add_image":
|
| 349 |
+
logger.info("πΈ User wants to add image")
|
| 350 |
+
|
| 351 |
+
await memory.add_message("assistant", response_text)
|
| 352 |
+
|
| 353 |
+
return ChatResponse(
|
| 354 |
+
success=True,
|
| 355 |
+
text=response_text,
|
| 356 |
+
action="add_image",
|
| 357 |
+
state=context
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
# ========== UNCLEAR ==========
|
| 361 |
+
else:
|
| 362 |
+
await memory.add_message("assistant", response_text)
|
| 363 |
+
|
| 364 |
+
return ChatResponse(
|
| 365 |
+
success=True,
|
| 366 |
+
text=response_text,
|
| 367 |
+
action="unclear",
|
| 368 |
+
state=context
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
# ========== HANDLE CASUAL CHAT / OTHER ==========
|
| 372 |
+
else:
|
| 373 |
+
logger.info("π¬ Casual chat", intent=intent)
|
| 374 |
+
|
| 375 |
+
response_text = await build_smart_response(
|
| 376 |
+
intent=intent,
|
| 377 |
+
confidence=confidence,
|
| 378 |
+
conversation_context=context,
|
| 379 |
+
language=language
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
await memory.add_message("assistant", response_text)
|
| 383 |
+
|
| 384 |
+
return ChatResponse(
|
| 385 |
+
success=True,
|
| 386 |
+
text=response_text,
|
| 387 |
+
action=intent,
|
| 388 |
+
state=context
|
| 389 |
+
)
|
| 390 |
|
| 391 |
+
except HTTPException:
|
| 392 |
+
raise
|
| 393 |
except Exception as e:
|
| 394 |
+
logger.error("β Chat endpoint error", exc_info=e)
|
| 395 |
raise HTTPException(
|
| 396 |
status_code=500,
|
| 397 |
+
detail=f"Error processing message: {str(e)}"
|
| 398 |
)
|
| 399 |
|
| 400 |
|
| 401 |
+
# ========== HEALTH CHECK ==========
|
| 402 |
@router.get("/health")
|
| 403 |
+
async def health_check():
|
| 404 |
+
"""Health check for chat service"""
|
| 405 |
+
return {
|
| 406 |
+
"status": "healthy",
|
| 407 |
+
"service": "Aida Chat",
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
# ========== GET CHAT HISTORY ==========
|
| 412 |
+
@router.get("/history/{session_id}")
|
| 413 |
+
async def get_chat_history(
|
| 414 |
+
session_id: str,
|
| 415 |
+
token: str = Depends(security),
|
| 416 |
+
):
|
| 417 |
+
"""
|
| 418 |
+
Get chat history for a session
|
| 419 |
+
"""
|
| 420 |
+
|
| 421 |
+
try:
|
| 422 |
+
payload = decode_access_token(token.credentials)
|
| 423 |
+
if not payload:
|
| 424 |
+
raise HTTPException(status_code=401, detail="Invalid token")
|
| 425 |
+
|
| 426 |
+
user_id = payload["user_id"]
|
| 427 |
+
|
| 428 |
+
# Get memory
|
| 429 |
+
memory = await get_current_memory(user_id, session_id)
|
| 430 |
+
|
| 431 |
+
# Get messages
|
| 432 |
+
messages = await memory.get_messages()
|
| 433 |
+
summary = await memory.get_summary()
|
| 434 |
+
|
| 435 |
+
logger.info("π Retrieved chat history", user_id=user_id, session_id=session_id)
|
| 436 |
+
|
| 437 |
+
return {
|
| 438 |
+
"success": True,
|
| 439 |
+
"summary": summary,
|
| 440 |
+
"messages": messages,
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
except HTTPException:
|
| 444 |
+
raise
|
| 445 |
+
except Exception as e:
|
| 446 |
+
logger.error("β Failed to get history", exc_info=e)
|
| 447 |
+
raise HTTPException(status_code=500, detail="Failed to retrieve history")
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
# ========== CLOSE SESSION ==========
|
| 451 |
+
@router.post("/close-session/{session_id}")
|
| 452 |
+
async def close_session(
|
| 453 |
+
session_id: str,
|
| 454 |
+
token: str = Depends(security),
|
| 455 |
+
):
|
| 456 |
+
"""
|
| 457 |
+
Close/clear a chat session
|
| 458 |
+
"""
|
| 459 |
+
|
| 460 |
+
try:
|
| 461 |
+
payload = decode_access_token(token.credentials)
|
| 462 |
+
if not payload:
|
| 463 |
+
raise HTTPException(status_code=401, detail="Invalid token")
|
| 464 |
+
|
| 465 |
+
user_id = payload["user_id"]
|
| 466 |
+
|
| 467 |
+
# Close session
|
| 468 |
+
from app.ai.memory.redis_context_memory import get_memory_manager
|
| 469 |
+
manager = get_memory_manager()
|
| 470 |
+
await manager.close_session(user_id, session_id)
|
| 471 |
+
|
| 472 |
+
logger.info("β Session closed", user_id=user_id, session_id=session_id)
|
| 473 |
+
|
| 474 |
+
return {
|
| 475 |
+
"success": True,
|
| 476 |
+
"message": "Session closed"
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
except HTTPException:
|
| 480 |
+
raise
|
| 481 |
+
except Exception as e:
|
| 482 |
+
logger.error("β Failed to close session", exc_info=e)
|
| 483 |
+
raise HTTPException(status_code=500, detail="Failed to close session")
|
app/ai/service.py
DELETED
|
@@ -1,90 +0,0 @@
|
|
| 1 |
-
# app/ai/service.py - COMPLETE SERVICE LAYER
|
| 2 |
-
import json
|
| 3 |
-
from typing import Dict, Any
|
| 4 |
-
from structlog import get_logger
|
| 5 |
-
|
| 6 |
-
from app.ai.graph import agent
|
| 7 |
-
from app.ai.memory.redis_memory import save_turn, load_history
|
| 8 |
-
|
| 9 |
-
logger = get_logger(__name__)
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
async def aida_chat_sync(
|
| 13 |
-
user_id: str,
|
| 14 |
-
user_role: str,
|
| 15 |
-
human_msg: str,
|
| 16 |
-
) -> Dict[str, Any]:
|
| 17 |
-
"""
|
| 18 |
-
Main synchronous chat endpoint
|
| 19 |
-
|
| 20 |
-
This is the entry point called by the API
|
| 21 |
-
|
| 22 |
-
Args:
|
| 23 |
-
user_id: Current user ID
|
| 24 |
-
user_role: 'landlord' or 'renter'
|
| 25 |
-
human_msg: User's message
|
| 26 |
-
|
| 27 |
-
Returns:
|
| 28 |
-
Complete state dict with ai_reply, search_results, draft_preview, etc.
|
| 29 |
-
"""
|
| 30 |
-
|
| 31 |
-
logger.info(f"π¬ Chat request", user_id=user_id, role=user_role)
|
| 32 |
-
|
| 33 |
-
try:
|
| 34 |
-
# ===== Load Conversation History =====
|
| 35 |
-
messages = await load_history(user_id)
|
| 36 |
-
|
| 37 |
-
# Add current user message
|
| 38 |
-
messages.append({"role": "user", "content": human_msg})
|
| 39 |
-
|
| 40 |
-
# ===== Initialize State =====
|
| 41 |
-
state = {
|
| 42 |
-
"user_id": user_id,
|
| 43 |
-
"user_role": user_role,
|
| 44 |
-
"messages": messages,
|
| 45 |
-
"intent": None,
|
| 46 |
-
"status": "greeting",
|
| 47 |
-
"allowed": False,
|
| 48 |
-
"ai_reply": "",
|
| 49 |
-
# Empty collections
|
| 50 |
-
"missing_fields": [],
|
| 51 |
-
"amenities": [],
|
| 52 |
-
"image_urls": [],
|
| 53 |
-
"search_results": [],
|
| 54 |
-
}
|
| 55 |
-
|
| 56 |
-
logger.info(f"π Initial state created")
|
| 57 |
-
|
| 58 |
-
# ===== Execute LangGraph Workflow =====
|
| 59 |
-
logger.info(f"π Executing LangGraph workflow")
|
| 60 |
-
final_state = await agent.ainvoke(state)
|
| 61 |
-
|
| 62 |
-
# ===== Save Conversation =====
|
| 63 |
-
messages.append({"role": "assistant", "content": final_state.get("ai_reply", "")})
|
| 64 |
-
await save_turn(user_id, messages)
|
| 65 |
-
|
| 66 |
-
logger.info(
|
| 67 |
-
f"β
Chat complete",
|
| 68 |
-
intent=final_state.get("intent"),
|
| 69 |
-
status=final_state.get("status"),
|
| 70 |
-
)
|
| 71 |
-
|
| 72 |
-
# ===== Return Response =====
|
| 73 |
-
return {
|
| 74 |
-
"text": final_state.get("ai_reply", ""),
|
| 75 |
-
"intent": final_state.get("intent"),
|
| 76 |
-
"status": final_state.get("status"),
|
| 77 |
-
"draft_preview": final_state.get("draft_preview"),
|
| 78 |
-
"search_results": final_state.get("search_results", []),
|
| 79 |
-
"missing_fields": final_state.get("missing_fields", []),
|
| 80 |
-
"next_question": final_state.get("next_question"),
|
| 81 |
-
}
|
| 82 |
-
|
| 83 |
-
except Exception as e:
|
| 84 |
-
logger.error(f"β Chat error", exc_info=e)
|
| 85 |
-
return {
|
| 86 |
-
"text": f"Sorry, I encountered an error: {str(e)}",
|
| 87 |
-
"intent": "error",
|
| 88 |
-
"status": "error",
|
| 89 |
-
"error": str(e),
|
| 90 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/services/dynamic_role_manager.py
DELETED
|
@@ -1,90 +0,0 @@
|
|
| 1 |
-
# app/ai/services/dynamic_role_manager.py β FINAL FIXED VERSION
|
| 2 |
-
import logging
|
| 3 |
-
from typing import Tuple
|
| 4 |
-
from enum import Enum
|
| 5 |
-
|
| 6 |
-
logger = logging.getLogger(__name__)
|
| 7 |
-
|
| 8 |
-
class BaseRole(Enum):
|
| 9 |
-
LANDLORD = "landlord"
|
| 10 |
-
RENTER = "renter"
|
| 11 |
-
|
| 12 |
-
class DynamicRole(Enum):
|
| 13 |
-
LANDLORD = "landlord"
|
| 14 |
-
HOST = "host"
|
| 15 |
-
SELLER = "seller"
|
| 16 |
-
RENTER = "renter"
|
| 17 |
-
GUEST = "guest"
|
| 18 |
-
BUYER = "buyer"
|
| 19 |
-
ROOMMATE_SEEKER = "roommate_seeker"
|
| 20 |
-
ROOMMATE_LISTER = "roommate_lister"
|
| 21 |
-
|
| 22 |
-
class DynamicRoleManager:
|
| 23 |
-
def get_dynamic_role_for_listing(
|
| 24 |
-
self,
|
| 25 |
-
base_role: str,
|
| 26 |
-
listing_type: str
|
| 27 |
-
) -> Tuple[str, str, float]:
|
| 28 |
-
base_role_lower = base_role.lower().strip()
|
| 29 |
-
listing_type_lower = listing_type.lower().strip() if listing_type else None
|
| 30 |
-
|
| 31 |
-
logger.info(f"Getting listing role: base='{base_role_lower}', type='{listing_type_lower}'")
|
| 32 |
-
|
| 33 |
-
# β
FIXED: Default to 'rent' if listing_type is None
|
| 34 |
-
if not listing_type_lower:
|
| 35 |
-
listing_type_lower = "rent"
|
| 36 |
-
|
| 37 |
-
if base_role_lower == "landlord":
|
| 38 |
-
if listing_type_lower == "short-stay":
|
| 39 |
-
return DynamicRole.HOST.value, "Short-stay property host", 0.99
|
| 40 |
-
elif listing_type_lower == "sale":
|
| 41 |
-
return DynamicRole.SELLER.value, "Property seller", 0.99
|
| 42 |
-
elif listing_type_lower == "rent":
|
| 43 |
-
return DynamicRole.LANDLORD.value, "Rental property landlord", 0.99
|
| 44 |
-
elif listing_type_lower == "roommate":
|
| 45 |
-
return None, "Landlords cannot list for roommate matching.", 0.0
|
| 46 |
-
else:
|
| 47 |
-
return DynamicRole.LANDLORD.value, "Rental property landlord", 0.99 # β
Default fallback
|
| 48 |
-
|
| 49 |
-
elif base_role_lower == "renter":
|
| 50 |
-
if listing_type_lower == "roommate":
|
| 51 |
-
return DynamicRole.ROOMMATE_LISTER.value, "Room share lister", 0.99
|
| 52 |
-
else:
|
| 53 |
-
return None, "Renters can only list rooms for roommate matching.", 0.0
|
| 54 |
-
|
| 55 |
-
logger.warning(f"Unknown base role: {base_role_lower}")
|
| 56 |
-
return None, "Unknown role", 0.0
|
| 57 |
-
|
| 58 |
-
def get_dynamic_role_for_search(
|
| 59 |
-
self,
|
| 60 |
-
base_role: str,
|
| 61 |
-
search_for: str = None
|
| 62 |
-
) -> Tuple[str, str, float]:
|
| 63 |
-
base_role_lower = base_role.lower().strip()
|
| 64 |
-
search_for_lower = search_for.lower().strip() if search_for else None
|
| 65 |
-
|
| 66 |
-
logger.info(f"Getting search role: base='{base_role_lower}', search_for='{search_for_lower}'")
|
| 67 |
-
|
| 68 |
-
if base_role_lower == "landlord":
|
| 69 |
-
logger.warning("Landlord searching for properties - unusual but allowed")
|
| 70 |
-
return DynamicRole.RENTER.value, "Searching for rentals (unusual)", 0.5
|
| 71 |
-
|
| 72 |
-
elif base_role_lower == "renter":
|
| 73 |
-
if search_for_lower == "short-stay":
|
| 74 |
-
return DynamicRole.GUEST.value, "Short-stay guest", 0.99
|
| 75 |
-
elif search_for_lower == "sale":
|
| 76 |
-
return DynamicRole.BUYER.value, "Property buyer", 0.99
|
| 77 |
-
elif search_for_lower == "roommate":
|
| 78 |
-
return DynamicRole.ROOMMATE_SEEKER.value, "Roommate seeker", 0.99
|
| 79 |
-
else:
|
| 80 |
-
return DynamicRole.RENTER.value, "Property searcher", 0.95
|
| 81 |
-
|
| 82 |
-
return None, "Unknown role", 0.0
|
| 83 |
-
|
| 84 |
-
_manager = None
|
| 85 |
-
|
| 86 |
-
def get_dynamic_role_manager() -> DynamicRoleManager:
|
| 87 |
-
global _manager
|
| 88 |
-
if _manager is None:
|
| 89 |
-
_manager = DynamicRoleManager()
|
| 90 |
-
return _manager
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/ai/services/property_listing_service.py
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/services/property_listing_service.py - Unified Listing Service
|
| 2 |
+
"""
|
| 3 |
+
Complete property listing service that ties all modules together:
|
| 4 |
+
- Intent detection
|
| 5 |
+
- Field collection with auto-detection
|
| 6 |
+
- Image upload to Cloudflare
|
| 7 |
+
- Draft generation & preview
|
| 8 |
+
- Publish/Edit/Discard handling
|
| 9 |
+
- MongoDB integration
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from typing import Dict, Optional, Tuple
|
| 13 |
+
from structlog import get_logger
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
|
| 16 |
+
# Import all modules
|
| 17 |
+
from app.ai.modules.property_listing import (
|
| 18 |
+
start_listing,
|
| 19 |
+
continue_listing,
|
| 20 |
+
build_listing_state,
|
| 21 |
+
generate_draft,
|
| 22 |
+
)
|
| 23 |
+
from app.ai.modules.image_upload import (
|
| 24 |
+
process_image_upload,
|
| 25 |
+
has_images,
|
| 26 |
+
get_images_status,
|
| 27 |
+
ask_for_image_upload,
|
| 28 |
+
)
|
| 29 |
+
from app.ai.modules.draft_preview import (
|
| 30 |
+
generate_and_show_preview,
|
| 31 |
+
)
|
| 32 |
+
from app.ai.modules.post_draft_handler import (
|
| 33 |
+
handle_post_draft_action,
|
| 34 |
+
)
|
| 35 |
+
from app.ai.intent_detector import (
|
| 36 |
+
detect_intent_with_llm,
|
| 37 |
+
build_smart_response,
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
logger = get_logger(__name__)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# ========== PROPERTY LISTING SERVICE ==========
|
| 44 |
+
class PropertyListingService:
|
| 45 |
+
"""
|
| 46 |
+
Complete service for property listing workflow
|
| 47 |
+
|
| 48 |
+
Manages:
|
| 49 |
+
- Starting a new listing
|
| 50 |
+
- Collecting fields
|
| 51 |
+
- Uploading images
|
| 52 |
+
- Generating drafts
|
| 53 |
+
- Publishing/editing/discarding
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
def __init__(self):
|
| 57 |
+
logger.info("π PropertyListingService initialized")
|
| 58 |
+
|
| 59 |
+
# ========== 1. START LISTING ==========
|
| 60 |
+
async def start_listing(
|
| 61 |
+
self,
|
| 62 |
+
user_id: str,
|
| 63 |
+
user_role: str,
|
| 64 |
+
language: str,
|
| 65 |
+
) -> Dict:
|
| 66 |
+
"""
|
| 67 |
+
User says they want to list a property
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
{
|
| 71 |
+
"action": "show_example",
|
| 72 |
+
"example": "Example text...",
|
| 73 |
+
"state": {...}
|
| 74 |
+
}
|
| 75 |
+
"""
|
| 76 |
+
logger.info("π START_LISTING", user_id=user_id, user_role=user_role, language=language)
|
| 77 |
+
|
| 78 |
+
result = await start_listing(user_id, user_role, language)
|
| 79 |
+
|
| 80 |
+
return {
|
| 81 |
+
"action": "show_example",
|
| 82 |
+
"example": result["example"],
|
| 83 |
+
"state": result["state"],
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
# ========== 2. COLLECT FIELDS & AUTO-DETECT ==========
|
| 87 |
+
async def continue_listing(
|
| 88 |
+
self,
|
| 89 |
+
user_input: str,
|
| 90 |
+
current_state: Dict,
|
| 91 |
+
) -> Dict:
|
| 92 |
+
"""
|
| 93 |
+
User provides property information
|
| 94 |
+
|
| 95 |
+
Extracts fields, auto-detects price_type/listing_type/currency
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
- If missing fields: Ask for next field
|
| 99 |
+
- If all fields collected: Ask for images
|
| 100 |
+
"""
|
| 101 |
+
logger.info("π CONTINUE_LISTING", input=user_input[:50])
|
| 102 |
+
|
| 103 |
+
user_id = current_state.get("user_id")
|
| 104 |
+
user_role = current_state.get("user_role")
|
| 105 |
+
language = current_state.get("language", "en")
|
| 106 |
+
|
| 107 |
+
# Build updated state
|
| 108 |
+
updated_state = await build_listing_state(
|
| 109 |
+
user_id=user_id,
|
| 110 |
+
user_role=user_role,
|
| 111 |
+
language=language,
|
| 112 |
+
user_input=user_input,
|
| 113 |
+
current_state=current_state.get("raw_state", {}),
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
missing_fields = updated_state["missing_fields"]
|
| 117 |
+
|
| 118 |
+
if missing_fields:
|
| 119 |
+
# Still missing required fields
|
| 120 |
+
logger.info("π Missing required fields", missing=missing_fields)
|
| 121 |
+
|
| 122 |
+
from app.ai.modules.property_listing import generate_smart_question
|
| 123 |
+
|
| 124 |
+
next_field = missing_fields[0]
|
| 125 |
+
question = await generate_smart_question(next_field, language)
|
| 126 |
+
|
| 127 |
+
return {
|
| 128 |
+
"action": "ask_next_field",
|
| 129 |
+
"question": question,
|
| 130 |
+
"state": updated_state,
|
| 131 |
+
}
|
| 132 |
+
else:
|
| 133 |
+
# All required fields collected - check for images
|
| 134 |
+
logger.info("β
All required fields collected")
|
| 135 |
+
|
| 136 |
+
if has_images(updated_state["raw_state"]):
|
| 137 |
+
# Images already uploaded, ready for draft
|
| 138 |
+
logger.info("πΈ Images already present, generating draft")
|
| 139 |
+
return await self._generate_draft_internal(updated_state)
|
| 140 |
+
else:
|
| 141 |
+
# Ask for images
|
| 142 |
+
logger.info("πΈ Asking for images")
|
| 143 |
+
|
| 144 |
+
return {
|
| 145 |
+
"action": "ask_for_images",
|
| 146 |
+
"message": ask_for_image_upload(language),
|
| 147 |
+
"state": {
|
| 148 |
+
**updated_state,
|
| 149 |
+
"status": "waiting_for_images",
|
| 150 |
+
},
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
# ========== 3. UPLOAD IMAGE ==========
|
| 154 |
+
async def upload_image(
|
| 155 |
+
self,
|
| 156 |
+
image_base64: str,
|
| 157 |
+
mime_type: str,
|
| 158 |
+
current_state: Dict,
|
| 159 |
+
) -> Dict:
|
| 160 |
+
"""
|
| 161 |
+
User uploads an image
|
| 162 |
+
|
| 163 |
+
- Upload to Cloudflare
|
| 164 |
+
- Get URL back
|
| 165 |
+
- Save to state
|
| 166 |
+
- Check if ready for draft
|
| 167 |
+
"""
|
| 168 |
+
logger.info("οΏ½οΏ½οΏ½οΏ½ UPLOAD_IMAGE", mime_type=mime_type)
|
| 169 |
+
|
| 170 |
+
language = current_state.get("language", "en")
|
| 171 |
+
|
| 172 |
+
# Upload to Cloudflare
|
| 173 |
+
upload_result = await process_image_upload(
|
| 174 |
+
image_base64=image_base64,
|
| 175 |
+
mime_type=mime_type,
|
| 176 |
+
state=current_state.get("raw_state", {}),
|
| 177 |
+
language=language,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
if not upload_result["success"]:
|
| 181 |
+
# Upload failed
|
| 182 |
+
logger.error("β Image upload failed")
|
| 183 |
+
return {
|
| 184 |
+
"action": "upload_error",
|
| 185 |
+
"message": upload_result["message"],
|
| 186 |
+
"state": current_state,
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
# Upload successful
|
| 190 |
+
updated_state = current_state.copy()
|
| 191 |
+
updated_state["raw_state"] = upload_result["state"]
|
| 192 |
+
|
| 193 |
+
logger.info("β
Image uploaded", total=upload_result["total_images"])
|
| 194 |
+
|
| 195 |
+
# Check images status
|
| 196 |
+
images_status = get_images_status(upload_result["state"], language)
|
| 197 |
+
|
| 198 |
+
return {
|
| 199 |
+
"action": "image_uploaded",
|
| 200 |
+
"message": upload_result["message"],
|
| 201 |
+
"total_images": upload_result["total_images"],
|
| 202 |
+
"image_url": upload_result["image_url"],
|
| 203 |
+
"state": updated_state,
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
# ========== 4. GENERATE DRAFT (INTERNAL) ==========
|
| 207 |
+
async def _generate_draft_internal(self, state: Dict) -> Dict:
|
| 208 |
+
"""
|
| 209 |
+
Internal helper to generate draft
|
| 210 |
+
"""
|
| 211 |
+
logger.info("π GENERATE_DRAFT_INTERNAL")
|
| 212 |
+
|
| 213 |
+
# Generate title and description
|
| 214 |
+
draft_result = await generate_draft(state)
|
| 215 |
+
|
| 216 |
+
draft = draft_result["draft"]
|
| 217 |
+
user_id = state.get("user_id")
|
| 218 |
+
language = state.get("language", "en")
|
| 219 |
+
|
| 220 |
+
# Generate and show preview
|
| 221 |
+
preview_result = await generate_and_show_preview(user_id, draft, language)
|
| 222 |
+
|
| 223 |
+
return {
|
| 224 |
+
"action": "show_preview",
|
| 225 |
+
"preview": preview_result["preview_text"],
|
| 226 |
+
"draft": preview_result["draft"],
|
| 227 |
+
"state": preview_result["state"],
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
# ========== 5. GENERATE DRAFT (USER COMMAND) ==========
|
| 231 |
+
async def generate_draft(self, current_state: Dict) -> Dict:
|
| 232 |
+
"""
|
| 233 |
+
User says "generate preview" or "show draft"
|
| 234 |
+
|
| 235 |
+
Checks if images present, then generates draft
|
| 236 |
+
"""
|
| 237 |
+
logger.info("π GENERATE_DRAFT")
|
| 238 |
+
|
| 239 |
+
language = current_state.get("language", "en")
|
| 240 |
+
raw_state = current_state.get("raw_state", {})
|
| 241 |
+
|
| 242 |
+
# Check if images present
|
| 243 |
+
images_status = get_images_status(raw_state, language)
|
| 244 |
+
|
| 245 |
+
if not images_status["has_images"]:
|
| 246 |
+
# No images yet
|
| 247 |
+
logger.warning("β οΈ No images, can't generate draft")
|
| 248 |
+
return {
|
| 249 |
+
"action": "error",
|
| 250 |
+
"message": images_status["message"],
|
| 251 |
+
"state": current_state,
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
# Generate draft
|
| 255 |
+
return await self._generate_draft_internal(current_state)
|
| 256 |
+
|
| 257 |
+
# ========== 6. HANDLE POST-DRAFT ACTIONS ==========
|
| 258 |
+
async def handle_post_draft_action(
|
| 259 |
+
self,
|
| 260 |
+
user_input: str,
|
| 261 |
+
current_state: Dict,
|
| 262 |
+
) -> Dict:
|
| 263 |
+
"""
|
| 264 |
+
User is viewing draft preview and takes an action:
|
| 265 |
+
- publish
|
| 266 |
+
- edit
|
| 267 |
+
- discard
|
| 268 |
+
- add_image
|
| 269 |
+
"""
|
| 270 |
+
logger.info("π HANDLE_POST_DRAFT_ACTION", input=user_input[:50])
|
| 271 |
+
|
| 272 |
+
user_id = current_state.get("user_id")
|
| 273 |
+
language = current_state.get("language", "en")
|
| 274 |
+
draft = current_state.get("draft", {})
|
| 275 |
+
|
| 276 |
+
# Handle the action
|
| 277 |
+
result = await handle_post_draft_action(user_input, draft, user_id, language)
|
| 278 |
+
|
| 279 |
+
# ========== PUBLISH ==========
|
| 280 |
+
if result["action"] == "published":
|
| 281 |
+
logger.info("β
Listing published", mongo_id=result["mongo_id"])
|
| 282 |
+
|
| 283 |
+
return {
|
| 284 |
+
"action": "published",
|
| 285 |
+
"success": True,
|
| 286 |
+
"mongo_id": result["mongo_id"],
|
| 287 |
+
"message": result["message"],
|
| 288 |
+
"next_state": {
|
| 289 |
+
"status": "idle",
|
| 290 |
+
"draft": None,
|
| 291 |
+
"images": [],
|
| 292 |
+
},
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
# ========== PUBLISH ERROR ==========
|
| 296 |
+
elif result["action"] == "publish_error":
|
| 297 |
+
logger.error("β Publish failed", message=result["message"])
|
| 298 |
+
|
| 299 |
+
return {
|
| 300 |
+
"action": "publish_error",
|
| 301 |
+
"success": False,
|
| 302 |
+
"message": result["message"],
|
| 303 |
+
"state": {
|
| 304 |
+
**current_state,
|
| 305 |
+
"draft": result["draft"],
|
| 306 |
+
},
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
# ========== EDIT ==========
|
| 310 |
+
elif result["action"] in ["ask_which_field", "ask_new_value", "show_updated_preview"]:
|
| 311 |
+
logger.info("βοΈ Edit action", sub_action=result["action"])
|
| 312 |
+
|
| 313 |
+
if result["action"] == "show_updated_preview":
|
| 314 |
+
# Field was updated, regenerate preview
|
| 315 |
+
updated_state = {**current_state, "draft": result["draft"]}
|
| 316 |
+
preview_result = await generate_and_show_preview(user_id, result["draft"], language)
|
| 317 |
+
|
| 318 |
+
return {
|
| 319 |
+
"action": "show_updated_preview",
|
| 320 |
+
"message": result["message"],
|
| 321 |
+
"preview": preview_result["preview_text"],
|
| 322 |
+
"field": result["field"],
|
| 323 |
+
"state": preview_result["state"],
|
| 324 |
+
}
|
| 325 |
+
else:
|
| 326 |
+
# Need more info from user
|
| 327 |
+
return {
|
| 328 |
+
"action": result["action"],
|
| 329 |
+
"message": result["message"],
|
| 330 |
+
"field": result.get("field"),
|
| 331 |
+
"state": current_state,
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
# ========== DISCARD ==========
|
| 335 |
+
elif result["action"] == "discarded":
|
| 336 |
+
logger.info("β Draft discarded")
|
| 337 |
+
|
| 338 |
+
return {
|
| 339 |
+
"action": "discarded",
|
| 340 |
+
"message": result["message"],
|
| 341 |
+
"next_state": result["next_state"],
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
# ========== ADD IMAGE ==========
|
| 345 |
+
elif result["action"] == "add_image":
|
| 346 |
+
logger.info("πΈ Adding more images")
|
| 347 |
+
|
| 348 |
+
return {
|
| 349 |
+
"action": "add_image",
|
| 350 |
+
"message": result["message"],
|
| 351 |
+
"state": current_state,
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
# ========== UNCLEAR ==========
|
| 355 |
+
else:
|
| 356 |
+
logger.warning("β οΈ Unclear action")
|
| 357 |
+
|
| 358 |
+
return {
|
| 359 |
+
"action": "unclear",
|
| 360 |
+
"message": result["message"],
|
| 361 |
+
"state": current_state,
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
# ========== 7. GET STATE STATUS ==========
|
| 365 |
+
def get_status(self, state: Dict) -> Dict:
|
| 366 |
+
"""
|
| 367 |
+
Get current status of listing process
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
status = state.get("status", "unknown")
|
| 371 |
+
language = state.get("language", "en")
|
| 372 |
+
|
| 373 |
+
raw_state = state.get("raw_state", {})
|
| 374 |
+
images_status = get_images_status(raw_state, language)
|
| 375 |
+
|
| 376 |
+
return {
|
| 377 |
+
"status": status,
|
| 378 |
+
"provided_fields": state.get("provided_fields", {}),
|
| 379 |
+
"missing_fields": state.get("missing_fields", []),
|
| 380 |
+
"images": {
|
| 381 |
+
"count": images_status["count"],
|
| 382 |
+
"has_images": images_status["has_images"],
|
| 383 |
+
"can_generate_draft": images_status["can_generate_draft"],
|
| 384 |
+
},
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
# ========== EXAMPLE USAGE ==========
|
| 389 |
+
"""
|
| 390 |
+
# Initialize service
|
| 391 |
+
service = PropertyListingService()
|
| 392 |
+
|
| 393 |
+
# Step 1: User says "I want to list"
|
| 394 |
+
result = await service.start_listing(
|
| 395 |
+
user_id="user123",
|
| 396 |
+
user_role="landlord",
|
| 397 |
+
language="en"
|
| 398 |
+
)
|
| 399 |
+
# Shows example, returns initial state
|
| 400 |
+
|
| 401 |
+
# Step 2: User provides property details
|
| 402 |
+
result = await service.continue_listing(
|
| 403 |
+
user_input="I have a 2-bed apartment in Lagos for 50k monthly with wifi",
|
| 404 |
+
current_state=result["state"]
|
| 405 |
+
)
|
| 406 |
+
# If all fields complete and images present β show preview
|
| 407 |
+
# If missing fields β ask for next field
|
| 408 |
+
# If fields complete but no images β ask for images
|
| 409 |
+
|
| 410 |
+
# Step 3: User uploads image
|
| 411 |
+
result = await service.upload_image(
|
| 412 |
+
image_base64="base64_encoded_image",
|
| 413 |
+
mime_type="image/jpeg",
|
| 414 |
+
current_state=result["state"]
|
| 415 |
+
)
|
| 416 |
+
# Image uploaded to Cloudflare, URL saved
|
| 417 |
+
|
| 418 |
+
# Step 4: Generate draft (if images present)
|
| 419 |
+
result = await service.generate_draft(current_state=result["state"])
|
| 420 |
+
# Shows formatted preview
|
| 421 |
+
|
| 422 |
+
# Step 5: User takes action (publish/edit/discard)
|
| 423 |
+
result = await service.handle_post_draft_action(
|
| 424 |
+
user_input="Publish this",
|
| 425 |
+
current_state=result["state"]
|
| 426 |
+
)
|
| 427 |
+
# Publishes to MongoDB, returns mongo_id
|
| 428 |
+
"""
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
# ========== TEST ==========
|
| 432 |
+
if __name__ == "__main__":
|
| 433 |
+
import asyncio
|
| 434 |
+
|
| 435 |
+
async def test():
|
| 436 |
+
print("\n" + "="*70)
|
| 437 |
+
print("π§ͺ UNIFIED PROPERTY LISTING SERVICE TEST")
|
| 438 |
+
print("="*70 + "\n")
|
| 439 |
+
|
| 440 |
+
service = PropertyListingService()
|
| 441 |
+
|
| 442 |
+
# Test 1: Start listing
|
| 443 |
+
print("1οΈβ£ Start listing")
|
| 444 |
+
result = await service.start_listing(
|
| 445 |
+
user_id="user123",
|
| 446 |
+
user_role="landlord",
|
| 447 |
+
language="en"
|
| 448 |
+
)
|
| 449 |
+
print(f" Action: {result['action']}")
|
| 450 |
+
print(f" Example: {result['example'][:100]}...\n")
|
| 451 |
+
state = result["state"]
|
| 452 |
+
|
| 453 |
+
# Test 2: Continue listing
|
| 454 |
+
print("2οΈβ£ Continue listing - provide property info")
|
| 455 |
+
result = await service.continue_listing(
|
| 456 |
+
user_input="I have a 2-bed, 1-bath apartment in Lagos for 50k monthly with wifi and parking",
|
| 457 |
+
current_state=state
|
| 458 |
+
)
|
| 459 |
+
print(f" Action: {result['action']}")
|
| 460 |
+
print(f" Message: {result.get('message', result.get('question', ''))[:80]}...\n")
|
| 461 |
+
state = result["state"]
|
| 462 |
+
|
| 463 |
+
# Test 3: Check status
|
| 464 |
+
print("3οΈβ£ Check status")
|
| 465 |
+
status = service.get_status(state)
|
| 466 |
+
print(f" Status: {status['status']}")
|
| 467 |
+
print(f" Missing fields: {status['missing_fields']}")
|
| 468 |
+
print(f" Has images: {status['images']['has_images']}\n")
|
| 469 |
+
|
| 470 |
+
asyncio.run(test())
|
app/ai/state.py
DELETED
|
@@ -1,53 +0,0 @@
|
|
| 1 |
-
# app/ai/state.py - Complete State Definition for LangGraph
|
| 2 |
-
from typing import TypedDict, List, Optional, Dict, Any
|
| 3 |
-
|
| 4 |
-
class ChatState(TypedDict, total=False):
|
| 5 |
-
"""Complete state for Aida AI conversation"""
|
| 6 |
-
|
| 7 |
-
# ============ User Info ============
|
| 8 |
-
user_id: str # Current user ID
|
| 9 |
-
user_role: str # 'landlord' or 'renter'
|
| 10 |
-
user_language: str # Detected language (English, French, Yoruba, etc)
|
| 11 |
-
|
| 12 |
-
# ============ Conversation ============
|
| 13 |
-
messages: List[Dict[str, str]] # Chat history: [{"role": "user"/"assistant", "content": "..."}]
|
| 14 |
-
|
| 15 |
-
# ============ Intent & Status ============
|
| 16 |
-
intent: str # 'list' | 'search' | 'greeting' | 'my_listings'
|
| 17 |
-
status: str # Current flow state
|
| 18 |
-
allowed: bool # Permission check passed?
|
| 19 |
-
|
| 20 |
-
# ============ Listing Creation Fields ============
|
| 21 |
-
location: Optional[str] # City name
|
| 22 |
-
bedrooms: Optional[int] # Number of bedrooms
|
| 23 |
-
bathrooms: Optional[int] # Number of bathrooms
|
| 24 |
-
price: Optional[float] # Price amount
|
| 25 |
-
price_type: Optional[str] # 'monthly' | 'nightly' | 'yearly'
|
| 26 |
-
listing_type: Optional[str] # 'rent' | 'short-stay' | 'sale' | 'roommate'
|
| 27 |
-
amenities: List[str] # List of amenities
|
| 28 |
-
requirements: Optional[str] # Special requirements
|
| 29 |
-
currency: Optional[str] # Currency code (XOF, NGN, EUR, etc)
|
| 30 |
-
|
| 31 |
-
# ============ Search Fields ============
|
| 32 |
-
search_query: Optional[str] # User's search query
|
| 33 |
-
min_price: Optional[float] # Minimum price filter
|
| 34 |
-
max_price: Optional[float] # Maximum price filter
|
| 35 |
-
|
| 36 |
-
# ============ Collection Flow ============
|
| 37 |
-
missing_fields: List[str] # Fields still needed
|
| 38 |
-
next_question: Optional[str] # Next question to ask user
|
| 39 |
-
|
| 40 |
-
# ============ Draft & Preview ============
|
| 41 |
-
draft_preview: Optional[Dict[str, Any]] # Formatted draft shown to user
|
| 42 |
-
|
| 43 |
-
# ============ Publishing ============
|
| 44 |
-
mongo_id: Optional[str] # MongoDB ID after publish
|
| 45 |
-
|
| 46 |
-
# ============ Images ============
|
| 47 |
-
image_urls: List[str] # Cloudflare image URLs
|
| 48 |
-
|
| 49 |
-
# ============ Results ============
|
| 50 |
-
search_results: List[Dict[str, Any]] # Search results
|
| 51 |
-
|
| 52 |
-
# ============ AI Response ============
|
| 53 |
-
ai_reply: str # What Aida says to user
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main.py
CHANGED
|
@@ -1,8 +1,15 @@
|
|
| 1 |
-
#
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
from fastapi import FastAPI, Request
|
| 6 |
from fastapi.middleware.cors import CORSMiddleware
|
| 7 |
from fastapi.responses import JSONResponse
|
| 8 |
from fastapi.exceptions import RequestValidationError
|
|
@@ -10,7 +17,11 @@ from contextlib import asynccontextmanager
|
|
| 10 |
import logging
|
| 11 |
import os
|
| 12 |
|
| 13 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
try:
|
| 15 |
from app.config import settings
|
| 16 |
from app.database import connect_db, disconnect_db, ensure_indexes as ensure_auth_indexes
|
|
@@ -19,106 +30,148 @@ try:
|
|
| 19 |
from app.core.exceptions import AuthException
|
| 20 |
setup_logger()
|
| 21 |
except ImportError as e:
|
| 22 |
-
|
| 23 |
-
logger = logging.getLogger(__name__)
|
| 24 |
-
logger.error(f"Import error: {e}")
|
| 25 |
-
|
| 26 |
class AuthException(Exception):
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
self.detail = detail
|
| 30 |
-
self.error_code = error_code
|
| 31 |
self.message = message
|
| 32 |
-
self.
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
|
| 37 |
-
try:
|
| 38 |
-
from app.ai.routes.chat import router as ai_chat_router
|
| 39 |
-
AI_AVAILABLE = True
|
| 40 |
-
logger.info("β
AI chat router imported successfully")
|
| 41 |
-
except ImportError as e:
|
| 42 |
-
logger.warning(f"β οΈ AI chat router not available: {e}")
|
| 43 |
-
AI_AVAILABLE = False
|
| 44 |
-
ai_chat_router = None
|
| 45 |
|
| 46 |
-
#
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
except ImportError:
|
| 50 |
-
logger.warning("β οΈ Listing models not available")
|
| 51 |
|
| 52 |
-
#
|
| 53 |
-
# Lifespan β Startup & Shutdown
|
| 54 |
-
# ====================================================================
|
| 55 |
@asynccontextmanager
|
| 56 |
async def lifespan(app: FastAPI):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
logger.info("=" * 70)
|
| 58 |
-
logger.info("π Starting Lojiz Platform + Aida AI")
|
| 59 |
logger.info("=" * 70)
|
| 60 |
-
|
| 61 |
-
#
|
| 62 |
try:
|
|
|
|
|
|
|
| 63 |
await connect_db()
|
| 64 |
await ensure_auth_indexes()
|
| 65 |
-
|
| 66 |
-
await ensure_listing_indexes()
|
| 67 |
-
except:
|
| 68 |
-
logger.warning("β οΈ Could not ensure listing indexes")
|
| 69 |
logger.info("β
MongoDB connected & indexed")
|
| 70 |
except Exception as e:
|
| 71 |
-
logger.critical("β MongoDB
|
| 72 |
raise
|
| 73 |
-
|
| 74 |
-
# 2. Redis
|
| 75 |
try:
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
except Exception as e:
|
| 79 |
-
logger.warning(f"β οΈ Redis
|
| 80 |
-
|
| 81 |
-
# 3. Qdrant
|
| 82 |
try:
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
| 86 |
except Exception as e:
|
| 87 |
-
logger.warning(f"β οΈ Qdrant
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
logger.info("=" * 70)
|
| 90 |
-
logger.info("
|
| 91 |
logger.info("=" * 70)
|
| 92 |
-
|
| 93 |
yield
|
| 94 |
-
|
| 95 |
-
# SHUTDOWN
|
| 96 |
logger.info("=" * 70)
|
| 97 |
-
logger.info("π Shutting down Lojiz Platform")
|
| 98 |
logger.info("=" * 70)
|
| 99 |
|
| 100 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
await disconnect_db()
|
| 102 |
logger.info("β
MongoDB disconnected")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
except Exception as e:
|
| 104 |
logger.warning(f"β οΈ Shutdown warning: {e}")
|
| 105 |
|
| 106 |
-
|
| 107 |
-
#
|
| 108 |
-
# ====================================================================
|
| 109 |
app = FastAPI(
|
| 110 |
title="Lojiz Platform + Aida AI",
|
| 111 |
-
description="
|
| 112 |
version="1.0.0",
|
| 113 |
lifespan=lifespan,
|
| 114 |
)
|
| 115 |
|
| 116 |
-
#
|
| 117 |
-
# CORS
|
| 118 |
-
# ====================================================================
|
| 119 |
-
environment = os.getenv("ENVIRONMENT", "development")
|
| 120 |
-
is_production = environment == "production"
|
| 121 |
-
|
| 122 |
cors_origins = [
|
| 123 |
"https://lojiz.onrender.com",
|
| 124 |
"https://lojiz.com",
|
|
@@ -127,34 +180,26 @@ cors_origins = [
|
|
| 127 |
"http://localhost",
|
| 128 |
"http://localhost:3000",
|
| 129 |
"http://localhost:5173",
|
| 130 |
-
"http://localhost:8080",
|
| 131 |
-
"http://localhost:55211",
|
| 132 |
"http://127.0.0.1",
|
| 133 |
"http://127.0.0.1:3000",
|
| 134 |
-
"http://127.0.0.1:
|
| 135 |
"http://127.0.0.1:8080",
|
| 136 |
-
"http://127.0.0.1:56205",
|
| 137 |
-
"https://destinyebuka-aida.hf.space", # Add for HF Spaces deployment
|
| 138 |
]
|
| 139 |
|
| 140 |
app.add_middleware(
|
| 141 |
CORSMiddleware,
|
| 142 |
allow_origins=cors_origins,
|
| 143 |
allow_credentials=True,
|
| 144 |
-
allow_methods=["
|
| 145 |
allow_headers=["*"],
|
| 146 |
expose_headers=["*"],
|
| 147 |
max_age=600,
|
| 148 |
)
|
| 149 |
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
# ====================================================================
|
| 153 |
-
# Exception handlers
|
| 154 |
-
# ====================================================================
|
| 155 |
@app.exception_handler(RequestValidationError)
|
| 156 |
async def validation_exception_handler(request: Request, exc: RequestValidationError):
|
| 157 |
-
logger.error(f"Validation error: {exc}")
|
| 158 |
errors = []
|
| 159 |
for error in exc.errors():
|
| 160 |
field = ".".join(str(loc) for loc in error["loc"][1:])
|
|
@@ -163,23 +208,23 @@ async def validation_exception_handler(request: Request, exc: RequestValidationE
|
|
| 163 |
status_code=400,
|
| 164 |
content={
|
| 165 |
"success": False,
|
| 166 |
-
"message": "Validation error. Please check your input.",
|
| 167 |
-
"error_code": "VALIDATION_ERROR",
|
| 168 |
"errors": errors,
|
| 169 |
},
|
| 170 |
)
|
| 171 |
|
|
|
|
| 172 |
@app.exception_handler(AuthException)
|
| 173 |
async def auth_exception_handler(request: Request, exc: AuthException):
|
| 174 |
-
logger.warning(f"Auth error [{exc.error_code}]: {exc.message}")
|
| 175 |
response = {"success": False, "message": exc.message, "error_code": exc.error_code}
|
| 176 |
if exc.data:
|
| 177 |
response["data"] = exc.data
|
| 178 |
return JSONResponse(status_code=exc.status_code, content=response)
|
| 179 |
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
|
|
|
| 183 |
return JSONResponse(
|
| 184 |
status_code=500,
|
| 185 |
content={
|
|
@@ -190,52 +235,119 @@ async def general_exception_handler(request: Request, exc: Exception):
|
|
| 190 |
},
|
| 191 |
)
|
| 192 |
|
| 193 |
-
|
| 194 |
-
#
|
| 195 |
-
|
| 196 |
-
|
|
|
|
| 197 |
app.include_router(auth.router, prefix="/api/auth", tags=["Authentication"])
|
| 198 |
|
| 199 |
-
# Aida AI Chat routes
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
|
|
|
| 205 |
|
| 206 |
-
# ====================================================================
|
| 207 |
-
# Health & Status Endpoints
|
| 208 |
-
# ====================================================================
|
| 209 |
@app.get("/health", tags=["Health"])
|
| 210 |
async def health_check():
|
| 211 |
-
"""
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
|
| 220 |
@app.get("/", tags=["Root"])
|
| 221 |
async def root():
|
| 222 |
-
"""
|
|
|
|
|
|
|
| 223 |
return {
|
| 224 |
"message": "Welcome to Lojiz Platform + Aida AI",
|
| 225 |
"docs": "/docs",
|
| 226 |
"health": "/health",
|
| 227 |
"environment": environment,
|
| 228 |
-
"
|
| 229 |
-
|
| 230 |
-
"ai_chat": "/ai/ask" if AI_AVAILABLE else "NOT AVAILABLE",
|
| 231 |
-
}
|
| 232 |
}
|
| 233 |
|
|
|
|
| 234 |
@app.options("/{full_path:path}", include_in_schema=False)
|
| 235 |
async def options_handler(full_path: str):
|
| 236 |
-
"""
|
|
|
|
|
|
|
| 237 |
return JSONResponse(status_code=200, content={})
|
| 238 |
|
| 239 |
-
|
| 240 |
-
#
|
| 241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/main.py - FastAPI + Aida AI (Updated with new modular architecture)
|
| 2 |
+
"""
|
| 3 |
+
Lojiz Platform with Aida AI - Modular Architecture
|
| 4 |
+
- Intent detection
|
| 5 |
+
- Property listing service
|
| 6 |
+
- Image upload (Cloudflare)
|
| 7 |
+
- Draft & preview
|
| 8 |
+
- Redis memory
|
| 9 |
+
- MongoDB integration
|
| 10 |
+
"""
|
| 11 |
|
| 12 |
+
from fastapi import FastAPI, Request
|
| 13 |
from fastapi.middleware.cors import CORSMiddleware
|
| 14 |
from fastapi.responses import JSONResponse
|
| 15 |
from fastapi.exceptions import RequestValidationError
|
|
|
|
| 17 |
import logging
|
| 18 |
import os
|
| 19 |
|
| 20 |
+
# Configure logging
|
| 21 |
+
logging.basicConfig(level=logging.INFO)
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
# ========== IMPORTS ==========
|
| 25 |
try:
|
| 26 |
from app.config import settings
|
| 27 |
from app.database import connect_db, disconnect_db, ensure_indexes as ensure_auth_indexes
|
|
|
|
| 30 |
from app.core.exceptions import AuthException
|
| 31 |
setup_logger()
|
| 32 |
except ImportError as e:
|
| 33 |
+
logger.error(f"β Core import error: {e}")
|
|
|
|
|
|
|
|
|
|
| 34 |
class AuthException(Exception):
|
| 35 |
+
"""Fallback AuthException"""
|
| 36 |
+
def __init__(self, message="Auth error", error_code="AUTH_ERROR", status_code=401, data=None):
|
|
|
|
|
|
|
| 37 |
self.message = message
|
| 38 |
+
self.error_code = error_code
|
| 39 |
+
self.status_code = status_code
|
| 40 |
+
self.data = data
|
| 41 |
+
super().__init__(self.message)
|
| 42 |
|
| 43 |
+
# ========== NEW AIDA IMPORTS ==========
|
| 44 |
+
from app.ai.routes.chat import router as ai_chat_router
|
| 45 |
+
from app.ai.config import (
|
| 46 |
+
validate_ai_startup,
|
| 47 |
+
check_redis_health,
|
| 48 |
+
check_qdrant_health,
|
| 49 |
+
redis_client,
|
| 50 |
+
qdrant_client,
|
| 51 |
+
)
|
| 52 |
+
from app.ai.memory.redis_context_memory import get_memory_manager
|
| 53 |
+
from app.ml.models.ml_listing_extractor import get_ml_extractor
|
| 54 |
+
from app.models.listing import ensure_listing_indexes
|
| 55 |
|
| 56 |
+
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
+
# ========== ENVIRONMENT ==========
|
| 59 |
+
environment = os.getenv("ENVIRONMENT", "development")
|
| 60 |
+
is_production = environment == "production"
|
|
|
|
|
|
|
| 61 |
|
| 62 |
+
# ========== LIFESPAN ==========
|
|
|
|
|
|
|
| 63 |
@asynccontextmanager
|
| 64 |
async def lifespan(app: FastAPI):
|
| 65 |
+
"""
|
| 66 |
+
Application lifespan - startup and shutdown
|
| 67 |
+
"""
|
| 68 |
+
|
| 69 |
logger.info("=" * 70)
|
| 70 |
+
logger.info("π Starting Lojiz Platform + Aida AI (Modular Architecture)")
|
| 71 |
logger.info("=" * 70)
|
| 72 |
+
|
| 73 |
+
# ========== STARTUP ==========
|
| 74 |
try:
|
| 75 |
+
# 1. Connect to MongoDB - CRITICAL
|
| 76 |
+
logger.info("π¦ Connecting to MongoDB...")
|
| 77 |
await connect_db()
|
| 78 |
await ensure_auth_indexes()
|
| 79 |
+
await ensure_listing_indexes()
|
|
|
|
|
|
|
|
|
|
| 80 |
logger.info("β
MongoDB connected & indexed")
|
| 81 |
except Exception as e:
|
| 82 |
+
logger.critical(f"β MongoDB connection failed - aborting startup: {e}")
|
| 83 |
raise
|
| 84 |
+
|
| 85 |
+
# 2. Redis - IMPORTANT
|
| 86 |
try:
|
| 87 |
+
logger.info("π΄ Connecting to Redis...")
|
| 88 |
+
if redis_client:
|
| 89 |
+
await redis_client.ping()
|
| 90 |
+
logger.info("β
Redis connected")
|
| 91 |
+
else:
|
| 92 |
+
logger.warning("β οΈ Redis not available (optional)")
|
| 93 |
except Exception as e:
|
| 94 |
+
logger.warning(f"β οΈ Redis connection failed (continuing without): {e}")
|
| 95 |
+
|
| 96 |
+
# 3. Qdrant - IMPORTANT
|
| 97 |
try:
|
| 98 |
+
logger.info("π¦ Connecting to Qdrant...")
|
| 99 |
+
if qdrant_client:
|
| 100 |
+
await qdrant_client.get_collections()
|
| 101 |
+
logger.info("β
Qdrant connected")
|
| 102 |
+
else:
|
| 103 |
+
logger.warning("β οΈ Qdrant not available (optional)")
|
| 104 |
except Exception as e:
|
| 105 |
+
logger.warning(f"β οΈ Qdrant connection failed (continuing without): {e}")
|
| 106 |
+
|
| 107 |
+
# 4. Validate AI Components
|
| 108 |
+
try:
|
| 109 |
+
logger.info("π§ Validating AI components...")
|
| 110 |
+
ai_checks = await validate_ai_startup()
|
| 111 |
+
logger.info("β
AI components validated")
|
| 112 |
+
except Exception as e:
|
| 113 |
+
logger.warning(f"β οΈ AI validation failed: {e}")
|
| 114 |
+
|
| 115 |
+
# 5. ML Extractor
|
| 116 |
+
try:
|
| 117 |
+
logger.info("π€ Initializing ML Extractor...")
|
| 118 |
+
ml = get_ml_extractor()
|
| 119 |
+
logger.info("β
ML Extractor ready")
|
| 120 |
+
except Exception as e:
|
| 121 |
+
logger.warning(f"β οΈ ML Extractor initialization failed: {e}")
|
| 122 |
+
|
| 123 |
+
# 6. Memory Manager
|
| 124 |
+
try:
|
| 125 |
+
logger.info("πΎ Initializing Memory Manager...")
|
| 126 |
+
manager = get_memory_manager()
|
| 127 |
+
logger.info("β
Memory Manager ready")
|
| 128 |
+
except Exception as e:
|
| 129 |
+
logger.warning(f"β οΈ Memory Manager initialization failed: {e}")
|
| 130 |
+
|
| 131 |
logger.info("=" * 70)
|
| 132 |
+
logger.info("β
APPLICATION READY - All systems operational!")
|
| 133 |
logger.info("=" * 70)
|
| 134 |
+
|
| 135 |
yield
|
| 136 |
+
|
| 137 |
+
# ========== SHUTDOWN ==========
|
| 138 |
logger.info("=" * 70)
|
| 139 |
+
logger.info("π Shutting down Lojiz Platform + Aida AI")
|
| 140 |
logger.info("=" * 70)
|
| 141 |
|
| 142 |
try:
|
| 143 |
+
# Clear ML caches
|
| 144 |
+
try:
|
| 145 |
+
ml = get_ml_extractor()
|
| 146 |
+
ml.currency_mgr.clear_cache()
|
| 147 |
+
logger.info("β
ML caches cleared")
|
| 148 |
+
except:
|
| 149 |
+
pass
|
| 150 |
+
|
| 151 |
+
# Disconnect from MongoDB
|
| 152 |
+
from app.database import disconnect_db
|
| 153 |
await disconnect_db()
|
| 154 |
logger.info("β
MongoDB disconnected")
|
| 155 |
+
|
| 156 |
+
# Close Redis
|
| 157 |
+
if redis_client:
|
| 158 |
+
await redis_client.close()
|
| 159 |
+
logger.info("β
Redis closed")
|
| 160 |
+
|
| 161 |
+
logger.info("β
Shutdown complete")
|
| 162 |
except Exception as e:
|
| 163 |
logger.warning(f"β οΈ Shutdown warning: {e}")
|
| 164 |
|
| 165 |
+
|
| 166 |
+
# ========== FASTAPI SETUP ==========
|
|
|
|
| 167 |
app = FastAPI(
|
| 168 |
title="Lojiz Platform + Aida AI",
|
| 169 |
+
description="Real-estate platform with conversational AI assistant",
|
| 170 |
version="1.0.0",
|
| 171 |
lifespan=lifespan,
|
| 172 |
)
|
| 173 |
|
| 174 |
+
# ========== CORS ==========
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
cors_origins = [
|
| 176 |
"https://lojiz.onrender.com",
|
| 177 |
"https://lojiz.com",
|
|
|
|
| 180 |
"http://localhost",
|
| 181 |
"http://localhost:3000",
|
| 182 |
"http://localhost:5173",
|
|
|
|
|
|
|
| 183 |
"http://127.0.0.1",
|
| 184 |
"http://127.0.0.1:3000",
|
| 185 |
+
"http://127.0.0.1:5173",
|
| 186 |
"http://127.0.0.1:8080",
|
|
|
|
|
|
|
| 187 |
]
|
| 188 |
|
| 189 |
app.add_middleware(
|
| 190 |
CORSMiddleware,
|
| 191 |
allow_origins=cors_origins,
|
| 192 |
allow_credentials=True,
|
| 193 |
+
allow_methods=["*"],
|
| 194 |
allow_headers=["*"],
|
| 195 |
expose_headers=["*"],
|
| 196 |
max_age=600,
|
| 197 |
)
|
| 198 |
|
| 199 |
+
# ========== EXCEPTION HANDLERS ==========
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
@app.exception_handler(RequestValidationError)
|
| 201 |
async def validation_exception_handler(request: Request, exc: RequestValidationError):
|
| 202 |
+
logger.error(f"β Validation error: {exc}")
|
| 203 |
errors = []
|
| 204 |
for error in exc.errors():
|
| 205 |
field = ".".join(str(loc) for loc in error["loc"][1:])
|
|
|
|
| 208 |
status_code=400,
|
| 209 |
content={
|
| 210 |
"success": False,
|
|
|
|
|
|
|
| 211 |
"errors": errors,
|
| 212 |
},
|
| 213 |
)
|
| 214 |
|
| 215 |
+
|
| 216 |
@app.exception_handler(AuthException)
|
| 217 |
async def auth_exception_handler(request: Request, exc: AuthException):
|
| 218 |
+
logger.warning(f"π Auth error [{exc.error_code}]: {exc.message}")
|
| 219 |
response = {"success": False, "message": exc.message, "error_code": exc.error_code}
|
| 220 |
if exc.data:
|
| 221 |
response["data"] = exc.data
|
| 222 |
return JSONResponse(status_code=exc.status_code, content=response)
|
| 223 |
|
| 224 |
+
|
| 225 |
+
@app.exception_handler(BaseException)
|
| 226 |
+
async def general_exception_handler(request: Request, exc: BaseException):
|
| 227 |
+
logger.error(f"β Unexpected error: {str(exc)}", exc_info=True)
|
| 228 |
return JSONResponse(
|
| 229 |
status_code=500,
|
| 230 |
content={
|
|
|
|
| 235 |
},
|
| 236 |
)
|
| 237 |
|
| 238 |
+
|
| 239 |
+
# ========== ROUTERS ==========
|
| 240 |
+
logger.info("π‘ Registering routers...")
|
| 241 |
+
|
| 242 |
+
# Authentication routes
|
| 243 |
app.include_router(auth.router, prefix="/api/auth", tags=["Authentication"])
|
| 244 |
|
| 245 |
+
# Aida AI Chat routes
|
| 246 |
+
app.include_router(ai_chat_router, prefix="/ai", tags=["Aida AI Chat"])
|
| 247 |
+
|
| 248 |
+
logger.info("β
All routers registered")
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
# ========== ENDPOINTS ==========
|
| 252 |
|
|
|
|
|
|
|
|
|
|
| 253 |
@app.get("/health", tags=["Health"])
|
| 254 |
async def health_check():
|
| 255 |
+
"""
|
| 256 |
+
Health check endpoint
|
| 257 |
+
"""
|
| 258 |
+
|
| 259 |
+
try:
|
| 260 |
+
# Check Redis
|
| 261 |
+
redis_ok = False
|
| 262 |
+
if redis_client:
|
| 263 |
+
try:
|
| 264 |
+
await redis_client.ping()
|
| 265 |
+
redis_ok = True
|
| 266 |
+
except:
|
| 267 |
+
redis_ok = False
|
| 268 |
+
|
| 269 |
+
# Check Qdrant
|
| 270 |
+
qdrant_ok = False
|
| 271 |
+
if qdrant_client:
|
| 272 |
+
try:
|
| 273 |
+
await qdrant_client.get_collections()
|
| 274 |
+
qdrant_ok = True
|
| 275 |
+
except:
|
| 276 |
+
qdrant_ok = False
|
| 277 |
+
|
| 278 |
+
# Check ML
|
| 279 |
+
try:
|
| 280 |
+
ml = get_ml_extractor()
|
| 281 |
+
ml_ok = ml is not None
|
| 282 |
+
except:
|
| 283 |
+
ml_ok = False
|
| 284 |
+
|
| 285 |
+
return {
|
| 286 |
+
"status": "healthy",
|
| 287 |
+
"service": "Lojiz Platform + Aida AI",
|
| 288 |
+
"version": "1.0.0",
|
| 289 |
+
"environment": environment,
|
| 290 |
+
"components": {
|
| 291 |
+
"mongodb": "connected",
|
| 292 |
+
"redis": "connected" if redis_ok else "disconnected",
|
| 293 |
+
"qdrant": "connected" if qdrant_ok else "disconnected",
|
| 294 |
+
"ml": "ready" if ml_ok else "not ready",
|
| 295 |
+
}
|
| 296 |
+
}
|
| 297 |
+
except Exception as e:
|
| 298 |
+
logger.error(f"β Health check failed: {e}")
|
| 299 |
+
return {
|
| 300 |
+
"status": "unhealthy",
|
| 301 |
+
"error": str(e),
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
|
| 305 |
@app.get("/", tags=["Root"])
|
| 306 |
async def root():
|
| 307 |
+
"""
|
| 308 |
+
Root endpoint - API information
|
| 309 |
+
"""
|
| 310 |
return {
|
| 311 |
"message": "Welcome to Lojiz Platform + Aida AI",
|
| 312 |
"docs": "/docs",
|
| 313 |
"health": "/health",
|
| 314 |
"environment": environment,
|
| 315 |
+
"version": "1.0.0",
|
| 316 |
+
"description": "Real-estate platform with conversational AI assistant (Aida)",
|
|
|
|
|
|
|
| 317 |
}
|
| 318 |
|
| 319 |
+
|
| 320 |
@app.options("/{full_path:path}", include_in_schema=False)
|
| 321 |
async def options_handler(full_path: str):
|
| 322 |
+
"""
|
| 323 |
+
Handle CORS preflight requests
|
| 324 |
+
"""
|
| 325 |
return JSONResponse(status_code=200, content={})
|
| 326 |
|
| 327 |
+
|
| 328 |
+
# ========== STARTUP LOGGING ==========
|
| 329 |
+
@app.on_event("startup")
|
| 330 |
+
async def startup_event():
|
| 331 |
+
"""Log startup"""
|
| 332 |
+
logger.info("π FastAPI application started")
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
@app.on_event("shutdown")
|
| 336 |
+
async def shutdown_event():
|
| 337 |
+
"""Log shutdown"""
|
| 338 |
+
logger.info("π FastAPI application shutdown")
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
# ========== RUN ==========
|
| 342 |
+
"""
|
| 343 |
+
To run this application:
|
| 344 |
+
|
| 345 |
+
Development:
|
| 346 |
+
uvicorn app.main:app --reload
|
| 347 |
+
|
| 348 |
+
Production:
|
| 349 |
+
gunicorn -w 4 -k uvicorn.workers.UvicornWorker app.main:app
|
| 350 |
+
|
| 351 |
+
HF Spaces:
|
| 352 |
+
python app.py
|
| 353 |
+
"""
|