Spaces:
Running
Running
Commit
·
79ef7e1
1
Parent(s):
46d04ec
Deploy Lojiz Platform with Aida AI backend
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +0 -0
- .prettierrc +4 -0
- Dockerfile +35 -0
- README.md +375 -0
- app.py +33 -0
- app/__init__.py +4 -0
- app/__pycache__/__init__.cpython-312.pyc +0 -0
- app/__pycache__/config.cpython-312.pyc +0 -0
- app/__pycache__/database.cpython-312.pyc +0 -0
- app/__pycache__/main.cpython-312.pyc +0 -0
- app/ai/__init__.py +0 -0
- app/ai/__pycache__/__init__.cpython-312.pyc +0 -0
- app/ai/__pycache__/config.cpython-312.pyc +0 -0
- app/ai/__pycache__/graph.cpython-312.pyc +0 -0
- app/ai/__pycache__/service.cpython-312.pyc +0 -0
- app/ai/__pycache__/state.cpython-312.pyc +0 -0
- app/ai/config.py +66 -0
- app/ai/graph.py +87 -0
- app/ai/memory/__init__.py +0 -0
- app/ai/memory/__pycache__/__init__.cpython-312.pyc +0 -0
- app/ai/memory/__pycache__/redis_memory.cpython-312.pyc +0 -0
- app/ai/memory/redis_memory.py +31 -0
- app/ai/nodes/__init__.py +0 -0
- app/ai/nodes/__pycache__/__init__.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/draft_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/image_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/intent_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/publish_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/role_gate_node.cpython-312.pyc +0 -0
- app/ai/nodes/__pycache__/search_node.cpython-312.pyc +0 -0
- app/ai/nodes/draft_node.py +161 -0
- app/ai/nodes/image_node.py +63 -0
- app/ai/nodes/intent_node.py +366 -0
- app/ai/nodes/publish_node.py +98 -0
- app/ai/nodes/role_gate_node.py +51 -0
- app/ai/nodes/search_node.py +298 -0
- app/ai/prompts/system_prompt.txt +241 -0
- app/ai/routes/__pycache__/chat.cpython-312.pyc +0 -0
- app/ai/routes/chat.py +37 -0
- app/ai/service.py +68 -0
- app/ai/state.py +106 -0
- app/ai/tools/__init__.py +0 -0
- app/ai/tools/price_suggest.py +0 -0
- app/ai/tools/validate_location.py +0 -0
- app/ai/utils/__pycache__/intent_extractor.cpython-312.pyc +0 -0
- app/ai/utils/intent_extractor.py +66 -0
- app/ai/vector/__init__.py +0 -0
- app/ai/vector/qdrant_client.py +0 -0
- app/config.py +112 -0
- app/core/__init__.py +1 -0
.gitignore
ADDED
|
Binary file (698 Bytes). View file
|
|
|
.prettierrc
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"singleQuote": true,
|
| 3 |
+
"trailingComma": "all"
|
| 4 |
+
}
|
Dockerfile
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ============================================================
|
| 2 |
+
# Dockerfile - For Render Deployment
|
| 3 |
+
# ============================================================
|
| 4 |
+
|
| 5 |
+
FROM python:3.11-slim
|
| 6 |
+
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
|
| 9 |
+
# Install system dependencies
|
| 10 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 11 |
+
build-essential \
|
| 12 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 13 |
+
|
| 14 |
+
# Copy requirements
|
| 15 |
+
COPY requirements.txt .
|
| 16 |
+
|
| 17 |
+
# Install Python dependencies
|
| 18 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 19 |
+
|
| 20 |
+
# Copy application
|
| 21 |
+
COPY . .
|
| 22 |
+
|
| 23 |
+
# Create non-root user
|
| 24 |
+
RUN useradd -m appuser && chown -R appuser:appuser /app
|
| 25 |
+
USER appuser
|
| 26 |
+
|
| 27 |
+
# Expose port
|
| 28 |
+
EXPOSE 8000
|
| 29 |
+
|
| 30 |
+
# Health check
|
| 31 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
| 32 |
+
CMD python -c "import requests; requests.get('http://localhost:8000/health')"
|
| 33 |
+
|
| 34 |
+
# Run application
|
| 35 |
+
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
README.md
CHANGED
|
@@ -11,4 +11,379 @@ license: mit
|
|
| 11 |
short_description: The ai model for Lojiz
|
| 12 |
---
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 11 |
short_description: The ai model for Lojiz
|
| 12 |
---
|
| 13 |
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
# Lojiz Authentication API - Python FastAPI Edition
|
| 17 |
+
|
| 18 |
+
Modern, secure, and production-ready authentication backend built with FastAPI, MongoDB, and Resend.
|
| 19 |
+
|
| 20 |
+
## Features
|
| 21 |
+
|
| 22 |
+
✅ **Dual Authentication** - Email or phone-based signup & login
|
| 23 |
+
✅ **OTP Verification** - 4-digit OTP with configurable expiry (15 min default)
|
| 24 |
+
✅ **Password Reset** - Secure password reset flow with temporary tokens
|
| 25 |
+
✅ **JWT Tokens** - 60-day login tokens + 10-minute reset tokens
|
| 26 |
+
✅ **Bcrypt Hashing** - Industry-standard password hashing
|
| 27 |
+
✅ **Email Templates** - Beautiful, responsive HTML email templates via Resend
|
| 28 |
+
✅ **Rate Limiting** - OTP attempt limits (5 max attempts)
|
| 29 |
+
✅ **MongoDB** - Async MongoDB with Motor driver
|
| 30 |
+
✅ **API Documentation** - Auto-generated Swagger docs
|
| 31 |
+
✅ **Production Ready** - Error handling, logging, security best practices
|
| 32 |
+
|
| 33 |
+
## Prerequisites
|
| 34 |
+
|
| 35 |
+
- Python 3.11+
|
| 36 |
+
- MongoDB Atlas account (free tier available)
|
| 37 |
+
- Resend account (for email sending)
|
| 38 |
+
- Git & GitHub account
|
| 39 |
+
- Render.com account (for deployment)
|
| 40 |
+
|
| 41 |
+
## Local Development Setup
|
| 42 |
+
|
| 43 |
+
### 1. Clone Repository
|
| 44 |
+
```bash
|
| 45 |
+
git clone https://github.com/yourusername/lojiz-auth-api.git
|
| 46 |
+
cd lojiz-auth-api
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
### 2. Create Virtual Environment
|
| 50 |
+
```bash
|
| 51 |
+
python3 -m venv venv
|
| 52 |
+
source venv/bin/activate # On Windows: venv\\Scripts\\activate
|
| 53 |
+
```
|
| 54 |
+
|
| 55 |
+
### 3. Install Dependencies
|
| 56 |
+
```bash
|
| 57 |
+
pip install -r requirements.txt
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
### 4. Setup Environment Variables
|
| 61 |
+
```bash
|
| 62 |
+
cp .env.example .env
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
Edit `.env` with:
|
| 66 |
+
```
|
| 67 |
+
DEBUG=True
|
| 68 |
+
ENVIRONMENT=development
|
| 69 |
+
MONGODB_URL=mongodb://localhost:27017
|
| 70 |
+
MONGODB_DATABASE=lojiz
|
| 71 |
+
JWT_SECRET=your-secret-key-here
|
| 72 |
+
RESEND_API_KEY=your-resend-api-key
|
| 73 |
+
RESEND_FROM_EMAIL=noreply@yourdomain.com
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
### 5. Run Application
|
| 77 |
+
```bash
|
| 78 |
+
uvicorn app.main:app --reload
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
Visit: http://localhost:8000/docs (Swagger UI)
|
| 82 |
+
|
| 83 |
+
## Project Structure
|
| 84 |
+
|
| 85 |
+
```
|
| 86 |
+
lojiz-auth-api/
|
| 87 |
+
├── app/
|
| 88 |
+
│ ├── core/
|
| 89 |
+
│ │ ├── security.py # JWT & password hashing
|
| 90 |
+
│ │ └── schemas.py # Pydantic models
|
| 91 |
+
│ ├── database.py # MongoDB connection
|
| 92 |
+
│ ├── config.py # Configuration
|
| 93 |
+
│ ├── models/
|
| 94 |
+
│ │ ├── user.py # User model
|
| 95 |
+
│ │ └── otp.py # OTP model
|
| 96 |
+
│ ├── routes/
|
| 97 |
+
│ │ └── auth.py # Auth endpoints
|
| 98 |
+
│ ├── services/
|
| 99 |
+
│ │ ├── auth_service.py # Auth logic
|
| 100 |
+
│ │ ├── otp_service.py # OTP logic
|
| 101 |
+
│ │ └── user_service.py # User logic
|
| 102 |
+
│ ├── schemas/
|
| 103 |
+
│ │ ├── auth.py # Auth DTOs
|
| 104 |
+
│ │ └── user.py # User DTOs
|
| 105 |
+
│ ├── guards/
|
| 106 |
+
│ │ └── jwt_guard.py # JWT auth
|
| 107 |
+
│ ├── utils/
|
| 108 |
+
│ │ └── logger.py # Logging
|
| 109 |
+
│ └── main.py # App entry point
|
| 110 |
+
├── requirements.txt
|
| 111 |
+
├── .env.example
|
| 112 |
+
├── .gitignore
|
| 113 |
+
├── Dockerfile
|
| 114 |
+
├── render.yaml
|
| 115 |
+
└── README.md
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
## API Endpoints
|
| 119 |
+
|
| 120 |
+
### Authentication
|
| 121 |
+
|
| 122 |
+
**POST** `/api/auth/signup`
|
| 123 |
+
- Create new account
|
| 124 |
+
- Returns: Confirmation to check email/phone for OTP
|
| 125 |
+
|
| 126 |
+
**POST** `/api/auth/verify-signup-otp`
|
| 127 |
+
- Verify signup OTP
|
| 128 |
+
- Returns: User data + JWT token
|
| 129 |
+
|
| 130 |
+
**POST** `/api/auth/login`
|
| 131 |
+
- Authenticate with email/phone + password
|
| 132 |
+
- Returns: User data + JWT token
|
| 133 |
+
|
| 134 |
+
**POST** `/api/auth/send-password-reset-otp`
|
| 135 |
+
- Request password reset
|
| 136 |
+
- Returns: Generic success (doesn't reveal if email exists)
|
| 137 |
+
|
| 138 |
+
**POST** `/api/auth/verify-password-reset-otp`
|
| 139 |
+
- Verify password reset OTP
|
| 140 |
+
- Returns: Temporary reset token
|
| 141 |
+
|
| 142 |
+
**POST** `/api/auth/reset-password`
|
| 143 |
+
- Reset password with token
|
| 144 |
+
- Header: `x-reset-token`
|
| 145 |
+
|
| 146 |
+
**POST** `/api/auth/resend-otp`
|
| 147 |
+
- Resend OTP for signup or password reset
|
| 148 |
+
|
| 149 |
+
### User Profile
|
| 150 |
+
|
| 151 |
+
**GET** `/api/auth/profile`
|
| 152 |
+
- Get current user profile
|
| 153 |
+
- Requires: Bearer token
|
| 154 |
+
|
| 155 |
+
**POST** `/api/auth/logout`
|
| 156 |
+
- Logout (client removes token)
|
| 157 |
+
- Requires: Bearer token
|
| 158 |
+
|
| 159 |
+
## MongoDB Setup
|
| 160 |
+
|
| 161 |
+
### 1. Create MongoDB Atlas Account
|
| 162 |
+
- Go to https://www.mongodb.com/cloud/atlas
|
| 163 |
+
- Sign up for free
|
| 164 |
+
- Create a project
|
| 165 |
+
|
| 166 |
+
### 2. Create Cluster
|
| 167 |
+
- Choose shared cluster (free)
|
| 168 |
+
- Select region closest to your users
|
| 169 |
+
- Create cluster
|
| 170 |
+
|
| 171 |
+
### 3. Get Connection String
|
| 172 |
+
- Click "Connect"
|
| 173 |
+
- Choose "Drivers"
|
| 174 |
+
- Copy connection string
|
| 175 |
+
- Replace `<password>` and `myFirstDatabase` with actual values
|
| 176 |
+
|
| 177 |
+
### 4. Update .env
|
| 178 |
+
```
|
| 179 |
+
MONGODB_URL=mongodb+srv://username:password@cluster.mongodb.net/lojiz?retryWrites=true&w=majority
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
### 5. Create Database Indexes (Auto-created on startup)
|
| 183 |
+
- Email (unique, sparse)
|
| 184 |
+
- Phone (unique, sparse)
|
| 185 |
+
- Role
|
| 186 |
+
- OTP TTL (15 minutes)
|
| 187 |
+
|
| 188 |
+
## Resend Email Setup
|
| 189 |
+
|
| 190 |
+
### 1. Create Resend Account
|
| 191 |
+
- Go to https://resend.com
|
| 192 |
+
- Sign up
|
| 193 |
+
- Get API key from dashboard
|
| 194 |
+
|
| 195 |
+
### 2. Verify Domain (Optional for Production)
|
| 196 |
+
- Add domain to Resend
|
| 197 |
+
- Update DNS records
|
| 198 |
+
- Verify domain
|
| 199 |
+
|
| 200 |
+
### 3. Update .env
|
| 201 |
+
```
|
| 202 |
+
RESEND_API_KEY=re_xxxxxxxxxxxxxxxxxxxx
|
| 203 |
+
RESEND_FROM_EMAIL=noreply@yourdomain.com
|
| 204 |
+
```
|
| 205 |
+
|
| 206 |
+
## Password Requirements
|
| 207 |
+
|
| 208 |
+
Passwords must contain:
|
| 209 |
+
- Minimum 8 characters
|
| 210 |
+
- At least one uppercase letter (A-Z)
|
| 211 |
+
- At least one lowercase letter (a-z)
|
| 212 |
+
- At least one digit (0-9)
|
| 213 |
+
- At least one special character (!@#$%^&*(),.?\":{}|<>)
|
| 214 |
+
|
| 215 |
+
Example: `SecurePass123!@`
|
| 216 |
+
|
| 217 |
+
## Token Details
|
| 218 |
+
|
| 219 |
+
### Login Token (JWT)
|
| 220 |
+
- **Expiry**: 60 days
|
| 221 |
+
- **Use Case**: Long-lived access token for normal users
|
| 222 |
+
- **Payload**: user_id, email, phone, role
|
| 223 |
+
|
| 224 |
+
### Reset Token (JWT)
|
| 225 |
+
- **Expiry**: 10 minutes
|
| 226 |
+
- **Use Case**: Short-lived token for password reset
|
| 227 |
+
- **Payload**: identifier, purpose
|
| 228 |
+
|
| 229 |
+
## Error Handling
|
| 230 |
+
|
| 231 |
+
All endpoints return structured error responses:
|
| 232 |
+
|
| 233 |
+
```json
|
| 234 |
+
{
|
| 235 |
+
"success": false,
|
| 236 |
+
"message": "Error description",
|
| 237 |
+
"errors": {}
|
| 238 |
+
}
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
Common HTTP Status Codes:
|
| 242 |
+
- `200 OK` - Success
|
| 243 |
+
- `400 Bad Request` - Validation/business logic error
|
| 244 |
+
- `401 Unauthorized` - Invalid/missing token
|
| 245 |
+
- `404 Not Found` - Resource not found
|
| 246 |
+
- `409 Conflict` - Resource already exists
|
| 247 |
+
- `500 Internal Server Error` - Server error
|
| 248 |
+
|
| 249 |
+
## Deployment to Render.com
|
| 250 |
+
|
| 251 |
+
### 1. Push to GitHub
|
| 252 |
+
```bash
|
| 253 |
+
git add .
|
| 254 |
+
git commit -m "Initial commit"
|
| 255 |
+
git push origin main
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
### 2. Deploy on Render
|
| 259 |
+
- Go to https://render.com
|
| 260 |
+
- Click "New +"
|
| 261 |
+
- Select "Web Service"
|
| 262 |
+
- Connect GitHub repository
|
| 263 |
+
- Choose Python runtime
|
| 264 |
+
- Set build command: `pip install -r requirements.txt`
|
| 265 |
+
- Set start command: `uvicorn app.main:app --host 0.0.0.0 --port $PORT`
|
| 266 |
+
|
| 267 |
+
### 3. Add Environment Variables
|
| 268 |
+
Set in Render dashboard:
|
| 269 |
+
```
|
| 270 |
+
ENVIRONMENT=production
|
| 271 |
+
DEBUG=False
|
| 272 |
+
JWT_SECRET=(generate: python -c "import secrets; print(secrets.token_urlsafe(32))")
|
| 273 |
+
MONGODB_URL=<your-mongodb-url>
|
| 274 |
+
RESEND_API_KEY=<your-resend-key>
|
| 275 |
+
RESEND_FROM_EMAIL=noreply@yourdomain.com
|
| 276 |
+
```
|
| 277 |
+
|
| 278 |
+
### 4. Monitor
|
| 279 |
+
- Check deployment logs
|
| 280 |
+
- Test health endpoint: https://your-app.render.com/health
|
| 281 |
+
- View real-time logs in Render dashboard
|
| 282 |
+
|
| 283 |
+
## Testing Endpoints
|
| 284 |
+
|
| 285 |
+
### Using cURL
|
| 286 |
+
|
| 287 |
+
**Signup:**
|
| 288 |
+
```bash
|
| 289 |
+
curl -X POST http://localhost:8000/api/auth/signup \\
|
| 290 |
+
-H "Content-Type: application/json" \\
|
| 291 |
+
-d '{
|
| 292 |
+
"first_name": "John",
|
| 293 |
+
"last_name": "Doe",
|
| 294 |
+
"email": "john@example.com",
|
| 295 |
+
"password": "SecurePass123!@",
|
| 296 |
+
"role": "renter"
|
| 297 |
+
}'
|
| 298 |
+
```
|
| 299 |
+
|
| 300 |
+
**Login:**
|
| 301 |
+
```bash
|
| 302 |
+
curl -X POST http://localhost:8000/api/auth/login \\
|
| 303 |
+
-H "Content-Type: application/json" \\
|
| 304 |
+
-d '{
|
| 305 |
+
"identifier": "john@example.com",
|
| 306 |
+
"password": "SecurePass123!@"
|
| 307 |
+
}'
|
| 308 |
+
```
|
| 309 |
+
|
| 310 |
+
**Get Profile:**
|
| 311 |
+
```bash
|
| 312 |
+
curl -X GET http://localhost:8000/api/auth/profile \\
|
| 313 |
+
-H "Authorization: Bearer <your-jwt-token>"
|
| 314 |
+
```
|
| 315 |
+
|
| 316 |
+
## Security Best Practices
|
| 317 |
+
|
| 318 |
+
✅ Passwords hashed with bcrypt (10 rounds)
|
| 319 |
+
✅ JWT tokens signed with HS256
|
| 320 |
+
✅ Password reset tokens expire in 10 minutes
|
| 321 |
+
✅ OTP expires in 15 minutes
|
| 322 |
+
✅ Max 5 OTP attempts before deletion
|
| 323 |
+
✅ CORS configured for specific origins
|
| 324 |
+
✅ Sensitive data excluded from responses
|
| 325 |
+
✅ Non-root user in Docker
|
| 326 |
+
✅ HTTPS enforced in production
|
| 327 |
+
✅ Environment variables for secrets
|
| 328 |
+
|
| 329 |
+
## Troubleshooting
|
| 330 |
+
|
| 331 |
+
### MongoDB Connection Error
|
| 332 |
+
```
|
| 333 |
+
Error: connect ECONNREFUSED
|
| 334 |
+
```
|
| 335 |
+
- Ensure MONGODB_URL is correct
|
| 336 |
+
- Check MongoDB Atlas network access
|
| 337 |
+
- Verify IP whitelist includes your server
|
| 338 |
+
|
| 339 |
+
### Resend Email Not Sending
|
| 340 |
+
```
|
| 341 |
+
Failed to send email
|
| 342 |
+
```
|
| 343 |
+
- Check RESEND_API_KEY is valid
|
| 344 |
+
- Verify RESEND_FROM_EMAIL is correct
|
| 345 |
+
- Check Resend dashboard for quota limits
|
| 346 |
+
|
| 347 |
+
### Token Validation Error
|
| 348 |
+
```
|
| 349 |
+
Invalid or expired token
|
| 350 |
+
```
|
| 351 |
+
- Ensure Bearer token format: `Authorization: Bearer <token>`
|
| 352 |
+
- Check token hasn't expired (60 days for login)
|
| 353 |
+
- Regenerate token if needed
|
| 354 |
+
|
| 355 |
+
## Performance Tips
|
| 356 |
+
|
| 357 |
+
1. **MongoDB Indexes**: Already created on startup
|
| 358 |
+
2. **Async/Await**: All I/O operations are async
|
| 359 |
+
3. **Connection Pooling**: Motor manages connection pool
|
| 360 |
+
4. **Caching**: Implement Redis for OTP caching (future)
|
| 361 |
+
5. **Rate Limiting**: Add rate limiter middleware (future)
|
| 362 |
+
|
| 363 |
+
## Future Enhancements
|
| 364 |
+
|
| 365 |
+
- [ ] Refresh token rotation
|
| 366 |
+
- [ ] Social login (Google, GitHub)
|
| 367 |
+
- [ ] 2FA support
|
| 368 |
+
- [ ] Account recovery questions
|
| 369 |
+
- [ ] Redis caching layer
|
| 370 |
+
- [ ] Rate limiting middleware
|
| 371 |
+
- [ ] API key authentication
|
| 372 |
+
- [ ] Admin dashboard
|
| 373 |
+
|
| 374 |
+
## License
|
| 375 |
+
|
| 376 |
+
MIT License - see LICENSE file
|
| 377 |
+
|
| 378 |
+
## Support
|
| 379 |
+
|
| 380 |
+
For issues or questions:
|
| 381 |
+
- GitHub Issues: https://github.com/yourusername/lojiz-auth-api/issues
|
| 382 |
+
- Email: support@lojiz.com
|
| 383 |
+
|
| 384 |
+
---
|
| 385 |
+
|
| 386 |
+
**Built with ❤️ using FastAPI, MongoDB, and Resend**
|
| 387 |
+
"""
|
| 388 |
+
|
| 389 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
app.py - Entry point for Hugging Face Spaces deployment
|
| 3 |
+
Runs the FastAPI server on port 7860 (HF default)
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
# Add current directory to path for imports
|
| 10 |
+
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
| 11 |
+
|
| 12 |
+
# Load environment variables
|
| 13 |
+
from dotenv import load_dotenv
|
| 14 |
+
load_dotenv()
|
| 15 |
+
|
| 16 |
+
# Import your FastAPI app
|
| 17 |
+
from main import app
|
| 18 |
+
|
| 19 |
+
if __name__ == "__main__":
|
| 20 |
+
import uvicorn
|
| 21 |
+
|
| 22 |
+
# HF Spaces uses port 7860 by default
|
| 23 |
+
port = int(os.getenv("PORT", 7860))
|
| 24 |
+
host = "0.0.0.0" # Listen on all interfaces
|
| 25 |
+
|
| 26 |
+
print(f"🚀 Starting Lojiz Platform + Aida AI on {host}:{port}")
|
| 27 |
+
|
| 28 |
+
uvicorn.run(
|
| 29 |
+
app,
|
| 30 |
+
host=host,
|
| 31 |
+
port=port,
|
| 32 |
+
log_level="info",
|
| 33 |
+
)
|
app/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Lojiz Authentication API"""
|
| 2 |
+
|
| 3 |
+
__version__ = "1.0.0"
|
| 4 |
+
__author__ = "Lojiz Team"
|
app/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (273 Bytes). View file
|
|
|
app/__pycache__/config.cpython-312.pyc
ADDED
|
Binary file (5.05 kB). View file
|
|
|
app/__pycache__/database.cpython-312.pyc
ADDED
|
Binary file (4.8 kB). View file
|
|
|
app/__pycache__/main.cpython-312.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
app/ai/__init__.py
ADDED
|
File without changes
|
app/ai/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (175 Bytes). View file
|
|
|
app/ai/__pycache__/config.cpython-312.pyc
ADDED
|
Binary file (2.35 kB). View file
|
|
|
app/ai/__pycache__/graph.cpython-312.pyc
ADDED
|
Binary file (2.72 kB). View file
|
|
|
app/ai/__pycache__/service.cpython-312.pyc
ADDED
|
Binary file (2.54 kB). View file
|
|
|
app/ai/__pycache__/state.cpython-312.pyc
ADDED
|
Binary file (4.43 kB). View file
|
|
|
app/ai/config.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/config.py - FIXED SENTRY + ML INTEGRATION
|
| 2 |
+
"""
|
| 3 |
+
Production-grade clients for:
|
| 4 |
+
- DeepSeek (chat completions)
|
| 5 |
+
- Redis (session memory / rate-limit)
|
| 6 |
+
- Qdrant (vector store)
|
| 7 |
+
- Sentry (observability)
|
| 8 |
+
- ML Extractor (AI field validation)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import sentry_sdk
|
| 13 |
+
from redis.asyncio import Redis
|
| 14 |
+
from qdrant_client import AsyncQdrantClient
|
| 15 |
+
import openai
|
| 16 |
+
from structlog import get_logger
|
| 17 |
+
|
| 18 |
+
logger = get_logger(__name__)
|
| 19 |
+
|
| 20 |
+
# ---------- DeepSeek ----------
|
| 21 |
+
openai.api_base = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
|
| 22 |
+
openai.api_key = os.getenv("DEEPSEEK_API_KEY")
|
| 23 |
+
|
| 24 |
+
if not openai.api_key: # fail fast
|
| 25 |
+
raise RuntimeError("DEEPSEEK_API_KEY not set")
|
| 26 |
+
|
| 27 |
+
# ---------- Redis ----------
|
| 28 |
+
REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
|
| 29 |
+
redis_client: Redis = Redis.from_url(
|
| 30 |
+
REDIS_URL,
|
| 31 |
+
decode_responses=True,
|
| 32 |
+
socket_connect_timeout=5,
|
| 33 |
+
socket_timeout=5,
|
| 34 |
+
max_connections=50,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# ---------- Qdrant ----------
|
| 38 |
+
QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333")
|
| 39 |
+
qdrant_client: AsyncQdrantClient = AsyncQdrantClient(
|
| 40 |
+
url=QDRANT_URL,
|
| 41 |
+
api_key=os.getenv("QDRANT_API_KEY"),
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
# ---------- Sentry - FIXED ----------
|
| 45 |
+
# ✅ FIXED: Skip huggingface_hub integration that causes AttributeError
|
| 46 |
+
# ✅ FIXED: Graceful fallback if Sentry fails
|
| 47 |
+
SENTRY_DSN = os.getenv("SENTRY_DSN")
|
| 48 |
+
if SENTRY_DSN:
|
| 49 |
+
try:
|
| 50 |
+
# ✅ Only initialize with essential integrations
|
| 51 |
+
# ❌ Removed: huggingface_hub integration (causes AttributeError)
|
| 52 |
+
sentry_sdk.init(
|
| 53 |
+
dsn=SENTRY_DSN,
|
| 54 |
+
traces_sample_rate=0.2,
|
| 55 |
+
profiles_sample_rate=0.2,
|
| 56 |
+
environment=os.getenv("ENVIRONMENT", "production"),
|
| 57 |
+
# ✅ FIXED: Don't auto-enable all integrations
|
| 58 |
+
# ✅ FIXED: Only use what we need
|
| 59 |
+
integrations=[], # Let Sentry choose safe defaults
|
| 60 |
+
)
|
| 61 |
+
logger.info("✅ Sentry AI observability enabled")
|
| 62 |
+
except Exception as e:
|
| 63 |
+
logger.warning(f"⚠️ Sentry initialization failed: {e}")
|
| 64 |
+
logger.warning(" Continuing without Sentry error tracking")
|
| 65 |
+
else:
|
| 66 |
+
logger.info("⚠️ SENTRY_DSN not set - error tracking disabled")
|
app/ai/graph.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/graph.py - FIXED NODE NAMING CONFLICT
|
| 2 |
+
from langgraph.graph import StateGraph, START, END
|
| 3 |
+
from app.ai.state import ChatState
|
| 4 |
+
from app.ai.nodes.intent_node import intent_node
|
| 5 |
+
from app.ai.nodes.role_gate_node import role_gate_node
|
| 6 |
+
from app.ai.nodes.search_node import search_node
|
| 7 |
+
from app.ai.nodes.draft_node import draft_node
|
| 8 |
+
from app.ai.nodes.publish_node import publish_node
|
| 9 |
+
from app.ai.utils.intent_extractor import extract_intent_from_state
|
| 10 |
+
from structlog import get_logger
|
| 11 |
+
|
| 12 |
+
logger = get_logger(__name__)
|
| 13 |
+
|
| 14 |
+
workflow = StateGraph(ChatState)
|
| 15 |
+
|
| 16 |
+
# Add all nodes with unique names that don't conflict with state keys
|
| 17 |
+
# ✅ FIXED: Changed node names to avoid conflicts with state fields
|
| 18 |
+
workflow.add_node("parse_intent", intent_node) # was "intent"
|
| 19 |
+
workflow.add_node("check_permissions", role_gate_node) # was "role_gate"
|
| 20 |
+
workflow.add_node("search_listings", search_node) # was "search"
|
| 21 |
+
workflow.add_node("create_draft", draft_node) # was "draft"
|
| 22 |
+
workflow.add_node("handle_publish", publish_node) # was "publish"
|
| 23 |
+
|
| 24 |
+
# START → parse_intent (always start here)
|
| 25 |
+
workflow.add_edge(START, "parse_intent")
|
| 26 |
+
|
| 27 |
+
# From parse_intent, route to permission check
|
| 28 |
+
def route_from_intent(state):
|
| 29 |
+
"""Route from intent parsing to permission check"""
|
| 30 |
+
intent = state.get("intent")
|
| 31 |
+
|
| 32 |
+
# Only check role for listing and search intents
|
| 33 |
+
if intent in ["search", "list"]:
|
| 34 |
+
return "check_permissions"
|
| 35 |
+
else:
|
| 36 |
+
return END
|
| 37 |
+
|
| 38 |
+
workflow.add_conditional_edges(
|
| 39 |
+
"parse_intent",
|
| 40 |
+
route_from_intent,
|
| 41 |
+
{
|
| 42 |
+
"check_permissions": "check_permissions",
|
| 43 |
+
END: END
|
| 44 |
+
}
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# From permission check, route to actual operation or END
|
| 48 |
+
def route_from_permissions(state):
|
| 49 |
+
"""Route after permission check"""
|
| 50 |
+
if not state.get("allowed", False):
|
| 51 |
+
logger.info("🚫 Request blocked by permission check")
|
| 52 |
+
return END
|
| 53 |
+
|
| 54 |
+
intent = state.get("intent")
|
| 55 |
+
|
| 56 |
+
if intent == "search":
|
| 57 |
+
logger.info("🔍 Routing to search")
|
| 58 |
+
return "search_listings"
|
| 59 |
+
elif intent == "list":
|
| 60 |
+
logger.info("📝 Routing to draft creation")
|
| 61 |
+
return "create_draft"
|
| 62 |
+
else:
|
| 63 |
+
return END
|
| 64 |
+
|
| 65 |
+
workflow.add_conditional_edges(
|
| 66 |
+
"check_permissions",
|
| 67 |
+
route_from_permissions,
|
| 68 |
+
{
|
| 69 |
+
"search_listings": "search_listings",
|
| 70 |
+
"create_draft": "create_draft",
|
| 71 |
+
END: END
|
| 72 |
+
}
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# search_listings → END
|
| 76 |
+
workflow.add_edge("search_listings", END)
|
| 77 |
+
|
| 78 |
+
# create_draft → handle_publish
|
| 79 |
+
workflow.add_edge("create_draft", "handle_publish")
|
| 80 |
+
|
| 81 |
+
# handle_publish → END
|
| 82 |
+
workflow.add_edge("handle_publish", END)
|
| 83 |
+
|
| 84 |
+
# Compile the graph
|
| 85 |
+
agent = workflow.compile()
|
| 86 |
+
|
| 87 |
+
logger.info("✅ LangGraph compiled successfully with unique node names")
|
app/ai/memory/__init__.py
ADDED
|
File without changes
|
app/ai/memory/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (182 Bytes). View file
|
|
|
app/ai/memory/__pycache__/redis_memory.cpython-312.pyc
ADDED
|
Binary file (1.78 kB). View file
|
|
|
app/ai/memory/redis_memory.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/memory/redis_memory.py
|
| 2 |
+
import json
|
| 3 |
+
from typing import List
|
| 4 |
+
from app.ai.config import redis_client
|
| 5 |
+
from structlog import get_logger
|
| 6 |
+
|
| 7 |
+
logger = get_logger(__name__)
|
| 8 |
+
HISTORY_TTL = 60 * 60 * 24 * 7 # 7 days
|
| 9 |
+
RATE_LIMIT_KEY_TTL = 60 # 1 min
|
| 10 |
+
|
| 11 |
+
# ---------- history ----------
|
| 12 |
+
async def load_history(user_id: str) -> List[dict]:
|
| 13 |
+
raw = await redis_client.get(f"aida:history:{user_id}")
|
| 14 |
+
if raw is None:
|
| 15 |
+
return []
|
| 16 |
+
return json.loads(raw)
|
| 17 |
+
|
| 18 |
+
async def save_turn(user_id: str, messages: List[dict]) -> None:
|
| 19 |
+
await redis_client.setex(
|
| 20 |
+
f"aida:history:{user_id}",
|
| 21 |
+
HISTORY_TTL,
|
| 22 |
+
json.dumps(messages, ensure_ascii=False),
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# ---------- rate limit ----------
|
| 26 |
+
async def is_rate_limited(user_id: str, limit: int = 30) -> bool:
|
| 27 |
+
key = f"aida:rate:{user_id}"
|
| 28 |
+
current = await redis_client.incr(key)
|
| 29 |
+
if current == 1:
|
| 30 |
+
await redis_client.expire(key, RATE_LIMIT_KEY_TTL)
|
| 31 |
+
return current > limit
|
app/ai/nodes/__init__.py
ADDED
|
File without changes
|
app/ai/nodes/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (181 Bytes). View file
|
|
|
app/ai/nodes/__pycache__/draft_node.cpython-312.pyc
ADDED
|
Binary file (7.53 kB). View file
|
|
|
app/ai/nodes/__pycache__/image_node.cpython-312.pyc
ADDED
|
Binary file (3.53 kB). View file
|
|
|
app/ai/nodes/__pycache__/intent_node.cpython-312.pyc
ADDED
|
Binary file (13.6 kB). View file
|
|
|
app/ai/nodes/__pycache__/publish_node.cpython-312.pyc
ADDED
|
Binary file (4.56 kB). View file
|
|
|
app/ai/nodes/__pycache__/role_gate_node.cpython-312.pyc
ADDED
|
Binary file (2.05 kB). View file
|
|
|
app/ai/nodes/__pycache__/search_node.cpython-312.pyc
ADDED
|
Binary file (12.5 kB). View file
|
|
|
app/ai/nodes/draft_node.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/nodes/draft_node.py - FIXED WITH IMAGES SUPPORT
|
| 2 |
+
import datetime
|
| 3 |
+
from typing import Dict
|
| 4 |
+
from bson import ObjectId
|
| 5 |
+
from app.database import get_db
|
| 6 |
+
from app.ai.state import ListingDraft
|
| 7 |
+
from structlog import get_logger
|
| 8 |
+
from openai import AsyncOpenAI
|
| 9 |
+
from app.config import settings
|
| 10 |
+
from app.ml.models.ml_listing_extractor import get_ml_extractor
|
| 11 |
+
|
| 12 |
+
logger = get_logger(__name__)
|
| 13 |
+
|
| 14 |
+
client = AsyncOpenAI(api_key=settings.OPENAI_API_KEY)
|
| 15 |
+
ml_extractor = get_ml_extractor()
|
| 16 |
+
|
| 17 |
+
# ---------- helpers ----------
|
| 18 |
+
def _generate_title(state: Dict) -> str:
|
| 19 |
+
"""Generate professional title from listing data."""
|
| 20 |
+
bedrooms = state.get("bedrooms", "")
|
| 21 |
+
location = state.get("location", "").title()
|
| 22 |
+
listing_type = state.get("listing_type", "").title()
|
| 23 |
+
|
| 24 |
+
if bedrooms and location:
|
| 25 |
+
return f"{bedrooms}-Bedroom {listing_type} in {location}"
|
| 26 |
+
return f"Property in {location}"
|
| 27 |
+
|
| 28 |
+
def _generate_description(state: Dict) -> str:
|
| 29 |
+
"""Generate professional description from listing data."""
|
| 30 |
+
bedrooms = state.get("bedrooms", "")
|
| 31 |
+
bathrooms = state.get("bathrooms", "")
|
| 32 |
+
location = state.get("location", "").title()
|
| 33 |
+
amenities = state.get("amenities", [])
|
| 34 |
+
price = state.get("price", "")
|
| 35 |
+
price_type = state.get("price_type", "").title()
|
| 36 |
+
listing_type = state.get("listing_type", "").title()
|
| 37 |
+
|
| 38 |
+
desc = f"Beautiful {bedrooms}-bedroom, {bathrooms}-bathroom {listing_type} "
|
| 39 |
+
desc += f"located in {location}. "
|
| 40 |
+
|
| 41 |
+
if price:
|
| 42 |
+
desc += f"Priced at {price:,} {price_type}. "
|
| 43 |
+
|
| 44 |
+
if amenities:
|
| 45 |
+
amenities_str = ", ".join(amenities)
|
| 46 |
+
desc += f"Amenities include: {amenities_str}. "
|
| 47 |
+
|
| 48 |
+
desc += "Perfect for comfortable living in a convenient location."
|
| 49 |
+
return desc
|
| 50 |
+
|
| 51 |
+
# ---------- node ----------
|
| 52 |
+
async def draft_node(state: Dict) -> Dict:
|
| 53 |
+
"""
|
| 54 |
+
LangGraph node:
|
| 55 |
+
- Check if status == "draft_ready" (all required fields collected)
|
| 56 |
+
- ML VALIDATE all fields
|
| 57 |
+
- Generate title + description
|
| 58 |
+
- Show PREVIEW to user
|
| 59 |
+
- DO NOT save to MongoDB yet (wait for publish command)
|
| 60 |
+
"""
|
| 61 |
+
|
| 62 |
+
# Only process if listing creation with all fields ready
|
| 63 |
+
if state.get("intent") != "list" or state.get("status") != "draft_ready":
|
| 64 |
+
return state
|
| 65 |
+
|
| 66 |
+
user_id = state.get("user_id")
|
| 67 |
+
|
| 68 |
+
# ✅ ML VALIDATION before drafting
|
| 69 |
+
try:
|
| 70 |
+
validation = ml_extractor.validate_all_fields(state, user_id)
|
| 71 |
+
|
| 72 |
+
if not validation["all_valid"]:
|
| 73 |
+
# Fields failed validation, go back to collecting
|
| 74 |
+
issues_text = "\n".join([f"❌ {issue}" for issue in validation["issues"]])
|
| 75 |
+
state["ai_reply"] = f"""I found some issues with your listing:
|
| 76 |
+
|
| 77 |
+
{issues_text}
|
| 78 |
+
|
| 79 |
+
Let me ask again - could you clarify these fields?"""
|
| 80 |
+
|
| 81 |
+
state["status"] = "collecting"
|
| 82 |
+
# Re-populate missing fields based on validation
|
| 83 |
+
state["missing_fields"] = [
|
| 84 |
+
field for field, result in validation["field_validations"].items()
|
| 85 |
+
if not result["is_valid"]
|
| 86 |
+
]
|
| 87 |
+
|
| 88 |
+
logger.warning("🚫 Fields failed ML validation", issues=validation["issues"])
|
| 89 |
+
return state
|
| 90 |
+
|
| 91 |
+
logger.info("✅ All fields passed ML validation", user_id=user_id)
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error("❌ ML validation error", exc_info=e)
|
| 95 |
+
state["ai_reply"] = "Sorry, I couldn't validate your listing. Please try again."
|
| 96 |
+
state["status"] = "error"
|
| 97 |
+
return state
|
| 98 |
+
|
| 99 |
+
# Generate title and description
|
| 100 |
+
title = _generate_title(state)
|
| 101 |
+
description = _generate_description(state)
|
| 102 |
+
|
| 103 |
+
# ✅ Get images from state (if any were uploaded)
|
| 104 |
+
images = state.get("draft", {}).get("images", []) if isinstance(state.get("draft"), dict) else []
|
| 105 |
+
|
| 106 |
+
# Build draft preview with all fields including images
|
| 107 |
+
draft_preview = {
|
| 108 |
+
"title": title,
|
| 109 |
+
"description": description,
|
| 110 |
+
"location": state.get("location", "").title(),
|
| 111 |
+
"bedrooms": state.get("bedrooms"),
|
| 112 |
+
"bathrooms": state.get("bathrooms"),
|
| 113 |
+
"price": state.get("price"),
|
| 114 |
+
"price_type": state.get("price_type"),
|
| 115 |
+
"listing_type": state.get("listing_type"),
|
| 116 |
+
"amenities": state.get("amenities", []),
|
| 117 |
+
"requirements": state.get("requirements"),
|
| 118 |
+
"currency": state.get("currency", "XOF"),
|
| 119 |
+
"images": images, # ✅ INCLUDE IMAGES
|
| 120 |
+
"field_confidences": validation["field_validations"],
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
logger.info("🎯 Draft preview generated",
|
| 124 |
+
title=title,
|
| 125 |
+
location=state.get("location"),
|
| 126 |
+
image_count=len(images))
|
| 127 |
+
|
| 128 |
+
# Build nice preview message for user
|
| 129 |
+
images_section = ""
|
| 130 |
+
if images:
|
| 131 |
+
images_section = f"\n🖼️ Images: {len(images)} uploaded\n"
|
| 132 |
+
for idx, img_url in enumerate(images[:3], 1):
|
| 133 |
+
images_section += f" {idx}. {img_url[:60]}...\n"
|
| 134 |
+
if len(images) > 3:
|
| 135 |
+
images_section += f" ... and {len(images) - 3} more\n"
|
| 136 |
+
|
| 137 |
+
preview_text = f"""
|
| 138 |
+
┌─────────────────────────────────────────────────────────────────
|
| 139 |
+
🏠 LISTING PREVIEW
|
| 140 |
+
└─────────────────────────────────────────────────────────────────
|
| 141 |
+
**{draft_preview['title']}**
|
| 142 |
+
|
| 143 |
+
📍 Location: {draft_preview['location']}
|
| 144 |
+
🛏️ Bedrooms: {draft_preview['bedrooms']}
|
| 145 |
+
🚿 Bathrooms: {draft_preview['bathrooms']}
|
| 146 |
+
💰 Price: {draft_preview['price']:,} {draft_preview['price_type']} ({draft_preview['currency']})
|
| 147 |
+
|
| 148 |
+
{draft_preview['description']}
|
| 149 |
+
|
| 150 |
+
✨ Amenities: {', '.join(draft_preview['amenities']) if draft_preview['amenities'] else 'None specified'}
|
| 151 |
+
{images_section}
|
| 152 |
+
└─────────────────────────────────────────────────────────────────
|
| 153 |
+
|
| 154 |
+
👉 Say **publish** or click the publish button to make it live!
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
state["draft_preview"] = draft_preview
|
| 158 |
+
state["ai_reply"] = preview_text
|
| 159 |
+
state["status"] = "preview_shown" # Waiting for publish confirmation
|
| 160 |
+
|
| 161 |
+
return state
|
app/ai/nodes/image_node.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/nodes/image_node.py
|
| 2 |
+
import base64
|
| 3 |
+
import mimetypes
|
| 4 |
+
from typing import Dict
|
| 5 |
+
import httpx
|
| 6 |
+
from app.config import settings
|
| 7 |
+
from structlog import get_logger
|
| 8 |
+
|
| 9 |
+
logger = get_logger(__name__)
|
| 10 |
+
CF_UPLOAD_URL = f"https://api.cloudflare.com/client/v4/accounts/{settings.CF_ACCOUNT_ID}/images/v1"
|
| 11 |
+
|
| 12 |
+
async def image_node(state: Dict) -> Dict:
|
| 13 |
+
"""
|
| 14 |
+
LangGraph node:
|
| 15 |
+
- expects state["image"] = {"mime": "image/jpeg", "data": "<base64>"}
|
| 16 |
+
- uploads to Cloudflare Images -> public URL
|
| 17 |
+
- appends URL to draft.images[]
|
| 18 |
+
- replies with confirmation
|
| 19 |
+
"""
|
| 20 |
+
image = state.get("image")
|
| 21 |
+
if not image:
|
| 22 |
+
return state
|
| 23 |
+
|
| 24 |
+
mime_type = image["mime"]
|
| 25 |
+
base64_data = image["data"]
|
| 26 |
+
ext = mimetypes.guess_extension(mime_type) or ".jpg"
|
| 27 |
+
filename = f"upload{ext}"
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
binary = base64.b64decode(base64_data)
|
| 31 |
+
except Exception as e:
|
| 32 |
+
logger.warning("Bad base64 image", exc_info=e)
|
| 33 |
+
state["ai_reply"] = "❌ Invalid image data. Please try again."
|
| 34 |
+
return state
|
| 35 |
+
|
| 36 |
+
headers = {"Authorization": f"Bearer {settings.CF_API_TOKEN}"}
|
| 37 |
+
files = {"file": (filename, binary, mime_type)}
|
| 38 |
+
|
| 39 |
+
async with httpx.AsyncClient() as client:
|
| 40 |
+
try:
|
| 41 |
+
resp = await client.post(CF_UPLOAD_URL, files=files, headers=headers, timeout=30)
|
| 42 |
+
resp.raise_for_status()
|
| 43 |
+
data = resp.json()
|
| 44 |
+
if not data.get("success"):
|
| 45 |
+
raise ValueError(data.get("errors"))
|
| 46 |
+
public_url = f"https://imagedelivery.net/{data['result']['id']}/public"
|
| 47 |
+
except Exception as e:
|
| 48 |
+
logger.error("Cloudflare upload failed", exc_info=e)
|
| 49 |
+
state["ai_reply"] = "❌ Could not upload image. Please try again."
|
| 50 |
+
return state
|
| 51 |
+
|
| 52 |
+
# append URL to draft
|
| 53 |
+
draft = state.get("draft")
|
| 54 |
+
if draft:
|
| 55 |
+
if not hasattr(draft, "images"):
|
| 56 |
+
draft.images = []
|
| 57 |
+
draft.images.append(public_url)
|
| 58 |
+
state["ai_reply"] = f"📸 Picture uploaded! Add more or say **publish** when ready.\n{public_url}"
|
| 59 |
+
else:
|
| 60 |
+
state["ai_reply"] = "📸 Picture uploaded! Continue describing the property."
|
| 61 |
+
|
| 62 |
+
logger.info("Image uploaded to Cloudflare", url=public_url)
|
| 63 |
+
return state
|
app/ai/nodes/intent_node.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/nodes/intent_node.py - FIXED WITH ERROR HANDLING
|
| 2 |
+
import json
|
| 3 |
+
import re
|
| 4 |
+
from typing import Dict, List
|
| 5 |
+
from tenacity import retry, stop_after_attempt, wait_exponential
|
| 6 |
+
from openai import AsyncOpenAI
|
| 7 |
+
from structlog import get_logger
|
| 8 |
+
from app.config import settings
|
| 9 |
+
from app.ml.models.ml_listing_extractor import get_ml_extractor
|
| 10 |
+
|
| 11 |
+
logger = get_logger(__name__)
|
| 12 |
+
|
| 13 |
+
MODEL = "deepseek-chat"
|
| 14 |
+
MAX_TOKENS = 600
|
| 15 |
+
TEMP = 0
|
| 16 |
+
|
| 17 |
+
client = AsyncOpenAI(api_key=settings.DEEPSEEK_API_KEY,
|
| 18 |
+
base_url=settings.DEEPSEEK_BASE_URL)
|
| 19 |
+
|
| 20 |
+
ml_extractor = get_ml_extractor()
|
| 21 |
+
|
| 22 |
+
def _load_system() -> str:
|
| 23 |
+
with open("app/ai/prompts/system_prompt.txt", encoding="utf-8") as f:
|
| 24 |
+
return f.read()
|
| 25 |
+
|
| 26 |
+
SYSTEM_PROMPT = _load_system()
|
| 27 |
+
|
| 28 |
+
def _clean_json(raw: str) -> str:
|
| 29 |
+
"""Remove markdown code blocks from JSON string."""
|
| 30 |
+
cleaned = re.sub(r'```json\s*', '', raw)
|
| 31 |
+
cleaned = re.sub(r'```\s*', '', cleaned)
|
| 32 |
+
return cleaned.strip()
|
| 33 |
+
|
| 34 |
+
def _normalize_locations(location: str) -> str:
|
| 35 |
+
"""Normalize location names (handle typos)."""
|
| 36 |
+
if not location:
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
loc_lower = location.lower().strip()
|
| 40 |
+
|
| 41 |
+
location_map = {
|
| 42 |
+
"lago": "lagos",
|
| 43 |
+
"lgs": "lagos",
|
| 44 |
+
"lag": "lagos",
|
| 45 |
+
"cotnu": "cotonou",
|
| 46 |
+
"cotonus": "cotonou",
|
| 47 |
+
"cotou": "cotonou",
|
| 48 |
+
"nairobi": "nairobi",
|
| 49 |
+
"nbi": "nairobi",
|
| 50 |
+
"accra": "accra",
|
| 51 |
+
"acc": "accra",
|
| 52 |
+
"joburg": "johannesburg",
|
| 53 |
+
"jozi": "johannesburg",
|
| 54 |
+
"london": "london",
|
| 55 |
+
"paris": "paris",
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
return location_map.get(loc_lower, location.lower())
|
| 59 |
+
|
| 60 |
+
def _normalize_amenities(amenities: list) -> list:
|
| 61 |
+
"""Normalize amenity names (handle typos)."""
|
| 62 |
+
if not amenities:
|
| 63 |
+
return []
|
| 64 |
+
|
| 65 |
+
amenity_map = {
|
| 66 |
+
"balcno": "balcony",
|
| 67 |
+
"balconny": "balcony",
|
| 68 |
+
"parkng": "parking",
|
| 69 |
+
"park": "parking",
|
| 70 |
+
"wifi": "wifi",
|
| 71 |
+
"furnisd": "furnished",
|
| 72 |
+
"furnishd": "furnished",
|
| 73 |
+
"furnish": "furnished",
|
| 74 |
+
"ac": "air conditioning",
|
| 75 |
+
"air cond": "air conditioning",
|
| 76 |
+
"aircond": "air conditioning",
|
| 77 |
+
"gym": "gym",
|
| 78 |
+
"pool": "pool",
|
| 79 |
+
"swiming": "pool",
|
| 80 |
+
"kitchen": "kitchen",
|
| 81 |
+
"kitchn": "kitchen",
|
| 82 |
+
"garden": "garden",
|
| 83 |
+
"gdn": "garden",
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
normalized = []
|
| 87 |
+
for amenity in amenities:
|
| 88 |
+
if not amenity:
|
| 89 |
+
continue
|
| 90 |
+
am_lower = amenity.lower().strip()
|
| 91 |
+
normalized_am = amenity_map.get(am_lower, am_lower)
|
| 92 |
+
if normalized_am and normalized_am not in normalized:
|
| 93 |
+
normalized.append(normalized_am)
|
| 94 |
+
|
| 95 |
+
return normalized
|
| 96 |
+
|
| 97 |
+
def _normalize_price_type(price_type: str) -> str:
|
| 98 |
+
"""Normalize price_type (handle typos)."""
|
| 99 |
+
if not price_type:
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
pt_lower = price_type.lower().strip()
|
| 103 |
+
|
| 104 |
+
price_type_map = {
|
| 105 |
+
"montly": "monthly",
|
| 106 |
+
"monthyl": "monthly",
|
| 107 |
+
"mth": "monthly",
|
| 108 |
+
"month": "monthly",
|
| 109 |
+
"nightl": "nightly",
|
| 110 |
+
"nightly": "nightly",
|
| 111 |
+
"night": "nightly",
|
| 112 |
+
"daily": "daily",
|
| 113 |
+
"day": "daily",
|
| 114 |
+
"weakly": "weekly",
|
| 115 |
+
"weakyl": "weekly",
|
| 116 |
+
"week": "weekly",
|
| 117 |
+
"yr": "yearly",
|
| 118 |
+
"year": "yearly",
|
| 119 |
+
"yearly": "yearly",
|
| 120 |
+
"annum": "yearly",
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
return price_type_map.get(pt_lower, pt_lower)
|
| 124 |
+
|
| 125 |
+
def _normalize_listing_type(listing_type: str) -> str:
|
| 126 |
+
"""Normalize listing_type (handle typos)."""
|
| 127 |
+
if not listing_type:
|
| 128 |
+
return None
|
| 129 |
+
|
| 130 |
+
lt_lower = listing_type.lower().strip()
|
| 131 |
+
|
| 132 |
+
listing_type_map = {
|
| 133 |
+
"rent": "rent",
|
| 134 |
+
"for rent": "rent",
|
| 135 |
+
"rental": "rent",
|
| 136 |
+
"short stay": "short-stay",
|
| 137 |
+
"short-stay": "short-stay",
|
| 138 |
+
"shortsta": "short-stay",
|
| 139 |
+
"short stya": "short-stay",
|
| 140 |
+
"stayover": "short-stay",
|
| 141 |
+
"roommate": "roommate",
|
| 142 |
+
"roommat": "roommate",
|
| 143 |
+
"sharing": "roommate",
|
| 144 |
+
"flatmate": "roommate",
|
| 145 |
+
"shareflat": "roommate",
|
| 146 |
+
"sale": "sale",
|
| 147 |
+
"for sale": "sale",
|
| 148 |
+
"selling": "sale",
|
| 149 |
+
"sell": "sale",
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
return listing_type_map.get(lt_lower, lt_lower)
|
| 153 |
+
|
| 154 |
+
def _get_missing_fields(data: Dict) -> List[str]:
|
| 155 |
+
"""Identify which REQUIRED fields are missing for listing creation."""
|
| 156 |
+
if data.get("intent") != "list":
|
| 157 |
+
return []
|
| 158 |
+
|
| 159 |
+
required = ["location", "bedrooms", "bathrooms", "price", "listing_type", "price_type"]
|
| 160 |
+
missing = []
|
| 161 |
+
|
| 162 |
+
for field in required:
|
| 163 |
+
value = data.get(field)
|
| 164 |
+
if value is None or value == "" or value == 0:
|
| 165 |
+
missing.append(field)
|
| 166 |
+
|
| 167 |
+
return missing
|
| 168 |
+
|
| 169 |
+
def _get_next_question(missing_fields: List[str]) -> str:
|
| 170 |
+
"""Get the next question based on missing fields."""
|
| 171 |
+
if not missing_fields:
|
| 172 |
+
return None
|
| 173 |
+
|
| 174 |
+
next_field = missing_fields[0]
|
| 175 |
+
|
| 176 |
+
questions = {
|
| 177 |
+
"location": "Where is the property located? (city/area)",
|
| 178 |
+
"bedrooms": "How many bedrooms?",
|
| 179 |
+
"bathrooms": "How many bathrooms?",
|
| 180 |
+
"price": "What's the price/rent amount?",
|
| 181 |
+
"listing_type": "Is it for rent, short-stay, sale, or roommate?",
|
| 182 |
+
"price_type": "Is it monthly, nightly, or yearly?"
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
return questions.get(next_field, "What else should I know?")
|
| 186 |
+
|
| 187 |
+
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=5))
|
| 188 |
+
async def intent_node(state: Dict) -> Dict:
|
| 189 |
+
user_role = state["user_role"]
|
| 190 |
+
user_id = state.get("user_id")
|
| 191 |
+
human_msg = state["messages"][-1]["content"]
|
| 192 |
+
|
| 193 |
+
prompt = SYSTEM_PROMPT.replace("{user_role}", user_role)
|
| 194 |
+
messages = [
|
| 195 |
+
{"role": "system", "content": prompt},
|
| 196 |
+
{"role": "user", "content": human_msg},
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
logger.info("🤖 Aida intent call", user_role=user_role, msg=human_msg)
|
| 200 |
+
resp = await client.chat.completions.create(
|
| 201 |
+
model=MODEL,
|
| 202 |
+
messages=messages,
|
| 203 |
+
temperature=TEMP,
|
| 204 |
+
max_tokens=MAX_TOKENS,
|
| 205 |
+
)
|
| 206 |
+
raw = resp.choices[0].message.content.strip()
|
| 207 |
+
logger.debug("🤖 Aida raw response", raw=raw)
|
| 208 |
+
|
| 209 |
+
try:
|
| 210 |
+
cleaned = _clean_json(raw)
|
| 211 |
+
data = json.loads(cleaned)
|
| 212 |
+
except json.JSONDecodeError as e:
|
| 213 |
+
logger.error("❌ Aida bad json", raw=raw, exc_info=e)
|
| 214 |
+
data = {
|
| 215 |
+
"allowed": False,
|
| 216 |
+
"ai_reply": "Sorry, I didn't understand that. Could you rephrase?",
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
# Handle LISTING INTENT with progressive collection
|
| 220 |
+
if data.get("intent") == "list":
|
| 221 |
+
data["allowed"] = True
|
| 222 |
+
missing = _get_missing_fields(data)
|
| 223 |
+
|
| 224 |
+
if missing:
|
| 225 |
+
data["status"] = "collecting"
|
| 226 |
+
data["missing_fields"] = missing
|
| 227 |
+
data["next_question"] = _get_next_question(missing)
|
| 228 |
+
else:
|
| 229 |
+
data["status"] = "draft_ready"
|
| 230 |
+
data["missing_fields"] = []
|
| 231 |
+
|
| 232 |
+
# SEARCH is always allowed (role_gate_node will check)
|
| 233 |
+
if data.get("intent") == "search":
|
| 234 |
+
data["allowed"] = True
|
| 235 |
+
|
| 236 |
+
# Normalize values
|
| 237 |
+
location = _normalize_locations(data.get("location"))
|
| 238 |
+
amenities = _normalize_amenities(data.get("amenities", []))
|
| 239 |
+
price_type = _normalize_price_type(data.get("price_type"))
|
| 240 |
+
listing_type = _normalize_listing_type(data.get("listing_type"))
|
| 241 |
+
|
| 242 |
+
# ✅ SMART INFERENCE + ML VALIDATION with ERROR HANDLING
|
| 243 |
+
if data.get("intent") == "list":
|
| 244 |
+
location_input = data.get("location")
|
| 245 |
+
|
| 246 |
+
# 1️⃣ Extract city from detailed address using Nominatim (WITH ERROR HANDLING)
|
| 247 |
+
if location_input:
|
| 248 |
+
try:
|
| 249 |
+
city, location_info = await ml_extractor.extract_location_from_address(location_input)
|
| 250 |
+
if city:
|
| 251 |
+
data["location"] = city
|
| 252 |
+
data["location_details"] = location_info
|
| 253 |
+
logger.info(f"✅ Extracted city from address: {location_input} → {city}")
|
| 254 |
+
except Exception as e:
|
| 255 |
+
logger.warning(f"⚠️ Failed to extract location: {e}")
|
| 256 |
+
# Continue with original location
|
| 257 |
+
pass
|
| 258 |
+
|
| 259 |
+
# 2️⃣ Infer listing_type from user role + message keywords
|
| 260 |
+
try:
|
| 261 |
+
listing_type, listing_confidence = ml_extractor.infer_listing_type(
|
| 262 |
+
data,
|
| 263 |
+
user_role=user_role,
|
| 264 |
+
user_message=human_msg
|
| 265 |
+
)
|
| 266 |
+
if listing_type:
|
| 267 |
+
data["listing_type"] = listing_type
|
| 268 |
+
data["listing_confidence"] = listing_confidence
|
| 269 |
+
logger.info(f"✅ Inferred listing_type: {listing_type} (confidence: {listing_confidence})")
|
| 270 |
+
except Exception as e:
|
| 271 |
+
logger.warning(f"⚠️ Failed to infer listing_type: {e}")
|
| 272 |
+
# Will ask user for listing_type
|
| 273 |
+
pass
|
| 274 |
+
|
| 275 |
+
# 3️⃣ Infer currency from location using Nominatim + REST Countries (WITH ERROR HANDLING)
|
| 276 |
+
try:
|
| 277 |
+
currency, extracted_city, currency_confidence = await ml_extractor.infer_currency(data)
|
| 278 |
+
if currency:
|
| 279 |
+
data["currency"] = currency
|
| 280 |
+
data["currency_confidence"] = currency_confidence
|
| 281 |
+
if extracted_city:
|
| 282 |
+
data["location"] = extracted_city
|
| 283 |
+
logger.info(f"✅ Inferred currency: {currency} (confidence: {currency_confidence})")
|
| 284 |
+
except Exception as e:
|
| 285 |
+
logger.warning(f"⚠️ Failed to infer currency: {e}")
|
| 286 |
+
# Fallback to default
|
| 287 |
+
data["currency"] = data.get("currency", "XOF")
|
| 288 |
+
|
| 289 |
+
# 4️⃣ ML VALIDATION of extracted fields (WITH ERROR HANDLING)
|
| 290 |
+
try:
|
| 291 |
+
validation_issues = []
|
| 292 |
+
validation_suggestions = []
|
| 293 |
+
field_validations = {}
|
| 294 |
+
|
| 295 |
+
for field in ["location", "bedrooms", "bathrooms", "price", "price_type"]:
|
| 296 |
+
value = data.get(field)
|
| 297 |
+
if value is not None:
|
| 298 |
+
result = ml_extractor.validate_field(field, value, human_msg, user_id)
|
| 299 |
+
field_validations[field] = result # ✅ STORE
|
| 300 |
+
|
| 301 |
+
if not result["is_valid"]:
|
| 302 |
+
validation_issues.append(f"❌ {field}: {result['suggestion']}")
|
| 303 |
+
logger.warning(f"Validation failed for {field}", suggestion=result["suggestion"])
|
| 304 |
+
elif result["suggestion"]:
|
| 305 |
+
validation_suggestions.append(f"⚠️ {field}: {result['suggestion']}")
|
| 306 |
+
|
| 307 |
+
# ✅ Store validation results in state
|
| 308 |
+
data["field_validations"] = field_validations
|
| 309 |
+
data["validation_suggestions"] = validation_suggestions
|
| 310 |
+
|
| 311 |
+
# If validation issues, add them to the AI reply
|
| 312 |
+
if validation_issues:
|
| 313 |
+
current_reply = data.get("ai_reply", "")
|
| 314 |
+
data["ai_reply"] = current_reply + "\n\n" + "\n".join(validation_issues)
|
| 315 |
+
logger.info("ℹ️ ML validation issues found", issues=validation_issues)
|
| 316 |
+
|
| 317 |
+
except Exception as e:
|
| 318 |
+
logger.warning(f"⚠️ Failed to validate fields: {e}")
|
| 319 |
+
# Continue without validation results
|
| 320 |
+
pass
|
| 321 |
+
|
| 322 |
+
# ✅ Store intent in ai_reply as JSON for routing
|
| 323 |
+
# LangGraph will extract it from the response
|
| 324 |
+
intent_value = data.get("intent")
|
| 325 |
+
|
| 326 |
+
# Update state with all fields
|
| 327 |
+
state.update(
|
| 328 |
+
allowed=data.get("allowed", False),
|
| 329 |
+
status=data.get("status"),
|
| 330 |
+
missing_fields=data.get("missing_fields", []),
|
| 331 |
+
next_question=data.get("next_question"),
|
| 332 |
+
|
| 333 |
+
# Listing fields
|
| 334 |
+
listing_type=listing_type,
|
| 335 |
+
location=location,
|
| 336 |
+
bedrooms=data.get("bedrooms"),
|
| 337 |
+
bathrooms=data.get("bathrooms"),
|
| 338 |
+
price=data.get("price"),
|
| 339 |
+
price_type=price_type,
|
| 340 |
+
amenities=amenities,
|
| 341 |
+
requirements=data.get("requirements"),
|
| 342 |
+
|
| 343 |
+
# Search fields
|
| 344 |
+
min_price=data.get("min_price"),
|
| 345 |
+
max_price=data.get("max_price"),
|
| 346 |
+
|
| 347 |
+
# ML fields ✅ STORE
|
| 348 |
+
field_validations=data.get("field_validations"),
|
| 349 |
+
listing_confidence=data.get("listing_confidence"),
|
| 350 |
+
currency_confidence=data.get("currency_confidence"),
|
| 351 |
+
location_details=data.get("location_details"),
|
| 352 |
+
validation_suggestions=data.get("validation_suggestions"),
|
| 353 |
+
|
| 354 |
+
# Other
|
| 355 |
+
currency=data.get("currency", "XOF"),
|
| 356 |
+
ai_reply=data.get("ai_reply", ""),
|
| 357 |
+
draft_preview=data.get("draft_preview"),
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
logger.info("📄 Intent node processed",
|
| 361 |
+
intent=intent_value,
|
| 362 |
+
status=state.get("status"),
|
| 363 |
+
missing_fields=state.get("missing_fields"),
|
| 364 |
+
location=state.get("location"))
|
| 365 |
+
|
| 366 |
+
return state
|
app/ai/nodes/publish_node.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/nodes/publish_node.py - FIXED WITH IMAGES IN MONGODB
|
| 2 |
+
import datetime
|
| 3 |
+
from typing import Dict
|
| 4 |
+
from app.database import get_db
|
| 5 |
+
from app.ai.state import ListingDraft
|
| 6 |
+
from structlog import get_logger
|
| 7 |
+
from bson import ObjectId
|
| 8 |
+
|
| 9 |
+
logger = get_logger(__name__)
|
| 10 |
+
|
| 11 |
+
# ---------- intent keywords ----------
|
| 12 |
+
PUBLISH_WORDS = {"publish", "go live", "post it", "list it", "confirm", "yes", "ok", "okay"}
|
| 13 |
+
|
| 14 |
+
# ---------- node ----------
|
| 15 |
+
async def publish_node(state: Dict) -> Dict:
|
| 16 |
+
"""
|
| 17 |
+
LangGraph node:
|
| 18 |
+
- If user says "publish" → save draft to MongoDB as PUBLISHED
|
| 19 |
+
- Set status to "published"
|
| 20 |
+
- Return success message with MongoDB ID
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
# Only process if we have a draft preview waiting
|
| 24 |
+
if state.get("status") != "preview_shown" or not state.get("draft_preview"):
|
| 25 |
+
return state
|
| 26 |
+
|
| 27 |
+
last_text = state["messages"][-1]["content"].lower()
|
| 28 |
+
|
| 29 |
+
# Check if user confirmed publish
|
| 30 |
+
if not any(word in last_text for word in PUBLISH_WORDS):
|
| 31 |
+
# User didn't confirm, stay in preview_shown state
|
| 32 |
+
state["ai_reply"] = "Waiting for your confirmation. Say **publish** when ready!"
|
| 33 |
+
return state
|
| 34 |
+
|
| 35 |
+
# User confirmed! Save to MongoDB
|
| 36 |
+
draft_preview = state["draft_preview"]
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
db = await get_db()
|
| 40 |
+
col = db["listings"]
|
| 41 |
+
|
| 42 |
+
# ✅ Build listing data with all fields including images
|
| 43 |
+
listing_data = {
|
| 44 |
+
"owner_id": state["user_id"],
|
| 45 |
+
"type": draft_preview.get("listing_type"),
|
| 46 |
+
"title": draft_preview.get("title"),
|
| 47 |
+
"description": draft_preview.get("description"),
|
| 48 |
+
"price": draft_preview.get("price"),
|
| 49 |
+
"price_type": draft_preview.get("price_type"),
|
| 50 |
+
"bedrooms": draft_preview.get("bedrooms"),
|
| 51 |
+
"bathrooms": draft_preview.get("bathrooms"),
|
| 52 |
+
"location": draft_preview.get("location"),
|
| 53 |
+
"amenities": draft_preview.get("amenities", []),
|
| 54 |
+
"requirements": draft_preview.get("requirements"),
|
| 55 |
+
"currency": draft_preview.get("currency", "XOF"),
|
| 56 |
+
"images": draft_preview.get("images", []), # ✅ INCLUDE IMAGES
|
| 57 |
+
"status": "published",
|
| 58 |
+
"created_at": datetime.datetime.utcnow(),
|
| 59 |
+
"updated_at": datetime.datetime.utcnow(),
|
| 60 |
+
"views": 0, # Initialize view counter
|
| 61 |
+
"favorites": 0, # Initialize favorites counter
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
result = await col.insert_one(listing_data)
|
| 65 |
+
mongo_id = str(result.inserted_id)
|
| 66 |
+
|
| 67 |
+
logger.info("✅ Listing published to MongoDB",
|
| 68 |
+
mongo_id=mongo_id,
|
| 69 |
+
title=draft_preview.get("title"),
|
| 70 |
+
location=draft_preview.get("location"),
|
| 71 |
+
image_count=len(draft_preview.get("images", [])))
|
| 72 |
+
|
| 73 |
+
# Build success message with image info
|
| 74 |
+
image_info = ""
|
| 75 |
+
if draft_preview.get("images"):
|
| 76 |
+
image_info = f"\n🖼️ With {len(draft_preview['images'])} image(s)"
|
| 77 |
+
|
| 78 |
+
state["ai_reply"] = f"""
|
| 79 |
+
✅ **Listing Live!**
|
| 80 |
+
|
| 81 |
+
Your property is now published and visible to renters/buyers:
|
| 82 |
+
- **Title:** {draft_preview.get('title')}
|
| 83 |
+
- **Location:** {draft_preview.get('location')}
|
| 84 |
+
- **Price:** {draft_preview.get('price'):,} {draft_preview.get('price_type')}{image_info}
|
| 85 |
+
|
| 86 |
+
👉 Users can now find your listing when they search!
|
| 87 |
+
|
| 88 |
+
Want to list another property? Just say "list a property"
|
| 89 |
+
"""
|
| 90 |
+
state["status"] = "published"
|
| 91 |
+
state["mongo_id"] = mongo_id
|
| 92 |
+
|
| 93 |
+
except Exception as e:
|
| 94 |
+
logger.error("❌ Failed to publish listing", exc_info=e)
|
| 95 |
+
state["ai_reply"] = "Sorry, I couldn't publish your listing. Please try again."
|
| 96 |
+
state["status"] = "error"
|
| 97 |
+
|
| 98 |
+
return state
|
app/ai/nodes/role_gate_node.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/nodes/role_gate_node.py (final)
|
| 2 |
+
from typing import Dict
|
| 3 |
+
from structlog import get_logger
|
| 4 |
+
|
| 5 |
+
logger = get_logger(__name__)
|
| 6 |
+
|
| 7 |
+
LANDLORD_ALLOWED = {"rent", "short_stay", "sale"}
|
| 8 |
+
RENTER_ALLOWED = {"roommate"}
|
| 9 |
+
|
| 10 |
+
def role_gate_node(state: Dict) -> Dict:
|
| 11 |
+
"""
|
| 12 |
+
LangGraph node:
|
| 13 |
+
- input: state with allowed, listing_type, user_role, intent
|
| 14 |
+
- output: same state (may flip allowed + set ai_reply)
|
| 15 |
+
"""
|
| 16 |
+
# 1. search intent → skip role check entirely
|
| 17 |
+
if state.get("intent") == "search":
|
| 18 |
+
logger.info("Aida passed search intent")
|
| 19 |
+
return state
|
| 20 |
+
|
| 21 |
+
# 2. already blocked by intent node? keep message
|
| 22 |
+
if not state.get("allowed", False):
|
| 23 |
+
logger.info("Aida already denied", reason="intent")
|
| 24 |
+
return state
|
| 25 |
+
|
| 26 |
+
# 3. casual chat (no listing_type) → leave reply untouched
|
| 27 |
+
list_type = state.get("listing_type")
|
| 28 |
+
if list_type is None:
|
| 29 |
+
logger.info("Aida passed casual chat")
|
| 30 |
+
return state
|
| 31 |
+
|
| 32 |
+
# 4. real-estate listing → role check
|
| 33 |
+
allowed_set = LANDLORD_ALLOWED if state["user_role"] == "landlord" else RENTER_ALLOWED
|
| 34 |
+
if list_type not in allowed_set:
|
| 35 |
+
state["allowed"] = False
|
| 36 |
+
if state["user_role"] == "landlord":
|
| 37 |
+
state["ai_reply"] = (
|
| 38 |
+
"As a landlord you can only list for rent, short-stay or sale. "
|
| 39 |
+
"Would you like to list this as a rental instead?"
|
| 40 |
+
)
|
| 41 |
+
else:
|
| 42 |
+
state["ai_reply"] = (
|
| 43 |
+
"As a renter you can only list for roommate matching. "
|
| 44 |
+
"If you want to share your own space, great! Otherwise you can search listings."
|
| 45 |
+
)
|
| 46 |
+
logger.info("Aida denied by role gate", user_role=state["user_role"], type=list_type)
|
| 47 |
+
else:
|
| 48 |
+
state["ai_reply"] = state.get("ai_reply") or "Got it! Let me draft that for you."
|
| 49 |
+
logger.info("Aida approved", user_role=state["user_role"], type=list_type)
|
| 50 |
+
|
| 51 |
+
return state
|
app/ai/nodes/search_node.py
ADDED
|
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/nodes/search_node.py (comprehensive search - all fields)
|
| 2 |
+
import json
|
| 3 |
+
import httpx
|
| 4 |
+
from typing import Dict, List
|
| 5 |
+
from qdrant_client import AsyncQdrantClient, models
|
| 6 |
+
from app.config import settings
|
| 7 |
+
from structlog import get_logger
|
| 8 |
+
|
| 9 |
+
logger = get_logger(__name__)
|
| 10 |
+
|
| 11 |
+
EMBED_MODEL = "qwen/qwen3-embedding-8b"
|
| 12 |
+
TOP_K = 6
|
| 13 |
+
|
| 14 |
+
# ------------------------------------------------------------------
|
| 15 |
+
# Qdrant client
|
| 16 |
+
# ------------------------------------------------------------------
|
| 17 |
+
qdrant_client = AsyncQdrantClient(
|
| 18 |
+
url=settings.QDRANT_URL,
|
| 19 |
+
api_key=settings.QDRANT_API_KEY,
|
| 20 |
+
https=True,
|
| 21 |
+
timeout=60,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# ---------- helpers ----------
|
| 25 |
+
def _build_filter(state: Dict) -> models.Filter:
|
| 26 |
+
"""Build comprehensive Qdrant filter from ALL search fields."""
|
| 27 |
+
must = []
|
| 28 |
+
|
| 29 |
+
# Location filter (case-insensitive)
|
| 30 |
+
loc = (state.get("location") or "").lower()
|
| 31 |
+
if loc:
|
| 32 |
+
must.append(
|
| 33 |
+
models.FieldCondition(
|
| 34 |
+
key="location_lower",
|
| 35 |
+
match=models.MatchValue(value=loc)
|
| 36 |
+
)
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Price range filters (combine into single condition)
|
| 40 |
+
if state.get("min_price") is not None or state.get("max_price") is not None:
|
| 41 |
+
price_range = {}
|
| 42 |
+
if state.get("min_price") is not None:
|
| 43 |
+
price_range["gte"] = state["min_price"]
|
| 44 |
+
if state.get("max_price") is not None:
|
| 45 |
+
price_range["lte"] = state["max_price"]
|
| 46 |
+
|
| 47 |
+
if price_range:
|
| 48 |
+
must.append(
|
| 49 |
+
models.FieldCondition(
|
| 50 |
+
key="price",
|
| 51 |
+
range=models.Range(**price_range)
|
| 52 |
+
)
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
# Bedrooms filter
|
| 56 |
+
if state.get("bedrooms") is not None:
|
| 57 |
+
must.append(
|
| 58 |
+
models.FieldCondition(
|
| 59 |
+
key="bedrooms",
|
| 60 |
+
match=models.MatchValue(value=state["bedrooms"])
|
| 61 |
+
)
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Bathrooms filter
|
| 65 |
+
if state.get("bathrooms") is not None:
|
| 66 |
+
must.append(
|
| 67 |
+
models.FieldCondition(
|
| 68 |
+
key="bathrooms",
|
| 69 |
+
match=models.MatchValue(value=state["bathrooms"])
|
| 70 |
+
)
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# Price type filter (monthly, nightly, yearly, etc.)
|
| 74 |
+
price_type = (state.get("price_type") or "").lower()
|
| 75 |
+
if price_type:
|
| 76 |
+
must.append(
|
| 77 |
+
models.FieldCondition(
|
| 78 |
+
key="price_type_lower",
|
| 79 |
+
match=models.MatchValue(value=price_type)
|
| 80 |
+
)
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# Listing type filter (rent, short_stay, roommate, sale)
|
| 84 |
+
listing_type = (state.get("listing_type") or "").lower()
|
| 85 |
+
if listing_type:
|
| 86 |
+
must.append(
|
| 87 |
+
models.FieldCondition(
|
| 88 |
+
key="listing_type_lower",
|
| 89 |
+
match=models.MatchValue(value=listing_type)
|
| 90 |
+
)
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# Amenities filter (all mentioned amenities must exist in listing)
|
| 94 |
+
amenities = state.get("amenities", [])
|
| 95 |
+
if amenities:
|
| 96 |
+
for amenity in amenities:
|
| 97 |
+
amenity_lower = amenity.lower().strip()
|
| 98 |
+
if amenity_lower:
|
| 99 |
+
must.append(
|
| 100 |
+
models.FieldCondition(
|
| 101 |
+
key="amenities",
|
| 102 |
+
match=models.MatchValue(value=amenity_lower)
|
| 103 |
+
)
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
filt = models.Filter(must=must) if must else models.Filter()
|
| 107 |
+
logger.info("🔍 Filter built", must_conditions=len(must), location=loc,
|
| 108 |
+
min_price=state.get("min_price"), max_price=state.get("max_price"),
|
| 109 |
+
bedrooms=state.get("bedrooms"), bathrooms=state.get("bathrooms"),
|
| 110 |
+
amenities=amenities, price_type=price_type, listing_type=listing_type)
|
| 111 |
+
return filt
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
async def _embed(text: str) -> List[float]:
|
| 115 |
+
"""Call OpenRouter embedding endpoint (async)."""
|
| 116 |
+
payload = {
|
| 117 |
+
"model": EMBED_MODEL,
|
| 118 |
+
"input": text,
|
| 119 |
+
"encoding_format": "float",
|
| 120 |
+
}
|
| 121 |
+
headers = {
|
| 122 |
+
"Authorization": f"Bearer {settings.OPENROUTER_API_KEY}",
|
| 123 |
+
"Content-Type": "application/json",
|
| 124 |
+
"HTTP-Referer": "",
|
| 125 |
+
"X-Title": "",
|
| 126 |
+
}
|
| 127 |
+
async with httpx.AsyncClient(timeout=60) as client:
|
| 128 |
+
resp = await client.post(
|
| 129 |
+
"https://openrouter.ai/api/v1/embeddings",
|
| 130 |
+
headers=headers,
|
| 131 |
+
json=payload,
|
| 132 |
+
)
|
| 133 |
+
resp.raise_for_status()
|
| 134 |
+
return resp.json()["data"][0]["embedding"]
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
# ---------- suggestion helpers ----------
|
| 138 |
+
async def _search_with_must(must: List, vector: List[float]):
|
| 139 |
+
return await qdrant_client.search(
|
| 140 |
+
collection_name="listings",
|
| 141 |
+
query_vector=vector,
|
| 142 |
+
query_filter=models.Filter(must=must),
|
| 143 |
+
limit=TOP_K,
|
| 144 |
+
with_payload=True,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def _add_price_range(must: List, state: Dict):
|
| 149 |
+
"""Add combined price range filter."""
|
| 150 |
+
if state.get("min_price") is not None or state.get("max_price") is not None:
|
| 151 |
+
price_range = {}
|
| 152 |
+
if state.get("min_price") is not None:
|
| 153 |
+
price_range["gte"] = state["min_price"]
|
| 154 |
+
if state.get("max_price") is not None:
|
| 155 |
+
price_range["lte"] = state["max_price"]
|
| 156 |
+
|
| 157 |
+
if price_range:
|
| 158 |
+
must.append(models.FieldCondition(key="price", range=models.Range(**price_range)))
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def _hits_to_cards(hits):
|
| 162 |
+
return [
|
| 163 |
+
{
|
| 164 |
+
"id": hit.id,
|
| 165 |
+
"title": hit.payload.get("title") or f"{hit.payload.get('bedrooms', '')}-bed {hit.payload.get('location', '')}",
|
| 166 |
+
"location": hit.payload.get("location"),
|
| 167 |
+
"price": hit.payload.get("price"),
|
| 168 |
+
"price_type": hit.payload.get("price_type"),
|
| 169 |
+
"bedrooms": hit.payload.get("bedrooms"),
|
| 170 |
+
"bathrooms": hit.payload.get("bathrooms"),
|
| 171 |
+
"amenities": hit.payload.get("amenities", []),
|
| 172 |
+
"description": hit.payload.get("description"),
|
| 173 |
+
"listing_type": hit.payload.get("listing_type"),
|
| 174 |
+
}
|
| 175 |
+
for hit in hits
|
| 176 |
+
]
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
async def _suggest_relaxed(state: Dict, vector: List[float]) -> List[dict]:
|
| 180 |
+
"""
|
| 181 |
+
Loosen constraints progressively while keeping location strict.
|
| 182 |
+
If no location specified, return empty.
|
| 183 |
+
"""
|
| 184 |
+
loc = (state.get("location") or "").lower()
|
| 185 |
+
br = state.get("bedrooms")
|
| 186 |
+
amenities = state.get("amenities", [])
|
| 187 |
+
|
| 188 |
+
# If no location specified, return empty
|
| 189 |
+
if not loc:
|
| 190 |
+
return []
|
| 191 |
+
|
| 192 |
+
# Location is ALWAYS a hard constraint
|
| 193 |
+
location_filter = models.FieldCondition(
|
| 194 |
+
key="location_lower",
|
| 195 |
+
match=models.MatchValue(value=loc)
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# 1. Try with all filters intact
|
| 199 |
+
must = [location_filter]
|
| 200 |
+
if br is not None:
|
| 201 |
+
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
|
| 202 |
+
_add_price_range(must, state)
|
| 203 |
+
for amenity in amenities:
|
| 204 |
+
must.append(models.FieldCondition(key="amenities", match=models.MatchValue(value=amenity.lower())))
|
| 205 |
+
hits = await _search_with_must(must, vector)
|
| 206 |
+
if hits:
|
| 207 |
+
return _hits_to_cards(hits)
|
| 208 |
+
|
| 209 |
+
# 2. Loosen amenities (remove optional ones)
|
| 210 |
+
must = [location_filter]
|
| 211 |
+
if br is not None:
|
| 212 |
+
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
|
| 213 |
+
_add_price_range(must, state)
|
| 214 |
+
hits = await _search_with_must(must, vector)
|
| 215 |
+
if hits:
|
| 216 |
+
return _hits_to_cards(hits)
|
| 217 |
+
|
| 218 |
+
# 3. Loosen bedrooms ±1, keep location strict
|
| 219 |
+
if br is not None:
|
| 220 |
+
must = [location_filter]
|
| 221 |
+
new_br = br - 1 if br > 1 else br + 1
|
| 222 |
+
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=new_br)))
|
| 223 |
+
_add_price_range(must, state)
|
| 224 |
+
hits = await _search_with_must(must, vector)
|
| 225 |
+
if hits:
|
| 226 |
+
return _hits_to_cards(hits)
|
| 227 |
+
|
| 228 |
+
# 4. Loosen price +25%, keep location strict
|
| 229 |
+
must = [location_filter]
|
| 230 |
+
if br is not None:
|
| 231 |
+
must.append(models.FieldCondition(key="bedrooms", match=models.MatchValue(value=br)))
|
| 232 |
+
if state.get("max_price") is not None:
|
| 233 |
+
relaxed_max = int(state["max_price"] * 1.25)
|
| 234 |
+
must.append(models.FieldCondition(key="price", range=models.Range(lte=relaxed_max)))
|
| 235 |
+
else:
|
| 236 |
+
_add_price_range(must, state)
|
| 237 |
+
hits = await _search_with_must(must, vector)
|
| 238 |
+
if hits:
|
| 239 |
+
return _hits_to_cards(hits)
|
| 240 |
+
|
| 241 |
+
return []
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
# ---------- node ----------
|
| 245 |
+
async def search_node(state: Dict) -> Dict:
|
| 246 |
+
"""
|
| 247 |
+
LangGraph node: comprehensive search with all filters
|
| 248 |
+
- title/description (semantic via embedding)
|
| 249 |
+
- location, price range, bedrooms, bathrooms, amenities (keyword filters)
|
| 250 |
+
- price_type, listing_type
|
| 251 |
+
"""
|
| 252 |
+
query = state.get("search_query", "") or state["messages"][-1]["content"]
|
| 253 |
+
vector = await _embed(query)
|
| 254 |
+
filt = _build_filter(state)
|
| 255 |
+
|
| 256 |
+
logger.info("🔎 Searching Qdrant", query=query, filter=str(filt))
|
| 257 |
+
|
| 258 |
+
hits = await qdrant_client.search(
|
| 259 |
+
collection_name="listings",
|
| 260 |
+
query_vector=vector,
|
| 261 |
+
query_filter=filt,
|
| 262 |
+
limit=TOP_K,
|
| 263 |
+
with_payload=True,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
logger.info("📊 Qdrant search result", hits_count=len(hits))
|
| 267 |
+
cards = _hits_to_cards(hits)
|
| 268 |
+
|
| 269 |
+
# --- personalise zero-hit reply + suggestions
|
| 270 |
+
if not cards:
|
| 271 |
+
location = state.get("location") or "that area"
|
| 272 |
+
bedrooms = state.get("bedrooms")
|
| 273 |
+
price_bit = (
|
| 274 |
+
" in your price range"
|
| 275 |
+
if state.get("min_price") is not None or state.get("max_price") is not None
|
| 276 |
+
else ""
|
| 277 |
+
)
|
| 278 |
+
br_bit = f" with {bedrooms} bedrooms" if bedrooms else ""
|
| 279 |
+
amenities_bit = f" with {', '.join(state.get('amenities', []))}" if state.get("amenities") else ""
|
| 280 |
+
|
| 281 |
+
suggestions = await _suggest_relaxed(state, vector)
|
| 282 |
+
if suggestions:
|
| 283 |
+
state["ai_reply"] = (
|
| 284 |
+
f"I found no exact match for your request, "
|
| 285 |
+
f"but you might like these similar options:"
|
| 286 |
+
)
|
| 287 |
+
state["suggestions"] = suggestions
|
| 288 |
+
else:
|
| 289 |
+
state["ai_reply"] = (
|
| 290 |
+
f"I found no property in {location}{price_bit}{br_bit}{amenities_bit}. "
|
| 291 |
+
"Try widening your search or check back later!"
|
| 292 |
+
)
|
| 293 |
+
else:
|
| 294 |
+
state["ai_reply"] = f"Here are {len(cards)} places I found for you:"
|
| 295 |
+
state["search_results"] = cards
|
| 296 |
+
|
| 297 |
+
logger.info("Aida search finished", query=query, count=len(cards), suggestions=len(state.get("suggestions", [])))
|
| 298 |
+
return state
|
app/ai/prompts/system_prompt.txt
ADDED
|
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
You are Aida, Lojiz's friendly real-estate AI assistant.
|
| 2 |
+
User role: {user_role} (landlord | renter)
|
| 3 |
+
|
| 4 |
+
------------------------------------------------------
|
| 5 |
+
BRAND IDENTITY & RESPONSES
|
| 6 |
+
------------------------------------------------------
|
| 7 |
+
|
| 8 |
+
**WHO YOU ARE:**
|
| 9 |
+
- You are AIDA, Lojiz's proprietary real-estate AI assistant
|
| 10 |
+
- Do NOT mention DeepSeek, GPT, or any other LLM names
|
| 11 |
+
- You are a standalone AI developed specifically for Lojiz
|
| 12 |
+
- Never refer to yourself as based on other AI models
|
| 13 |
+
|
| 14 |
+
**IF ASKED "WHO ARE YOU?"**
|
| 15 |
+
"I'm Aida, Lojiz's AI real-estate assistant. I help landlords list properties and renters find their perfect home!"
|
| 16 |
+
|
| 17 |
+
**IF ASKED "WHO DESIGNED YOU?" OR "WHO CREATED YOU?"**
|
| 18 |
+
"I was designed and developed by the Lojiz team. Our talented team includes:
|
| 19 |
+
- **Emeka Destiny Ebuka** - Lead AI & ML Engineer/Developer - designed my intelligence and learning capabilities
|
| 20 |
+
- **Uche Samuel Chukwuemeka** - Product Designer - shaped my user experience and personality
|
| 21 |
+
|
| 22 |
+
Together, they created me to make real-estate transactions easier and more accessible for everyone."
|
| 23 |
+
|
| 24 |
+
**IF ASKED "WHO BUILT YOU?" OR "WHAT COMPANY MADE YOU?"**
|
| 25 |
+
"I'm built by Lojiz, a real-estate platform designed to connect landlords and renters seamlessly across Africa. The Lojiz team created me with passion and expertise in AI and real-estate technology."
|
| 26 |
+
|
| 27 |
+
**IF ASKED "ARE YOU AN LLM?" OR "WHAT MODEL ARE YOU?"**
|
| 28 |
+
"I'm Aida, Lojiz's specialized AI assistant. I'm not just a general-purpose AI - I'm specifically built to understand real-estate, help with property listings, and guide users through the Lojiz platform. That's what makes me different!"
|
| 29 |
+
|
| 30 |
+
**NEVER SAY:**
|
| 31 |
+
- ❌ "I'm based on DeepSeek"
|
| 32 |
+
- ❌ "I'm powered by GPT"
|
| 33 |
+
- ❌ "I'm an LLM (Language Model)"
|
| 34 |
+
- ❌ "I was trained on general data"
|
| 35 |
+
- ✅ Say "I'm Aida, Lojiz's AI assistant" instead
|
| 36 |
+
|
| 37 |
+
------------------------------------------------------
|
| 38 |
+
PERSONALITY & GRAMMAR TOLERANCE
|
| 39 |
+
------------------------------------------------------
|
| 40 |
+
- Speak like a human, short and warm.
|
| 41 |
+
- IGNORE typos, grammar mistakes, and spelling errors. Understand intent anyway.
|
| 42 |
+
- If the user greets or talks off-topic, reply casually and ask: "Would you like to list a property or search for one?"
|
| 43 |
+
- Only switch to JSON output when property details are mentioned OR when user clearly wants to SEARCH / SEE / FIND / LOOK FOR houses/flats/rooms/apartments.
|
| 44 |
+
- Be proud of being Aida - make it clear you're a specialized real-estate AI
|
| 45 |
+
|
| 46 |
+
------------------------------------------------------
|
| 47 |
+
LISTING CREATION - PROGRESSIVE COLLECTION
|
| 48 |
+
------------------------------------------------------
|
| 49 |
+
|
| 50 |
+
REQUIRED FIELDS (MUST collect before draft):
|
| 51 |
+
1. location (city/area only, e.g., "cotonou", "lagos")
|
| 52 |
+
2. bedrooms (number)
|
| 53 |
+
3. bathrooms (number)
|
| 54 |
+
4. price (amount)
|
| 55 |
+
5. listing_type (auto-detect: rent, short-stay, sale, roommate)
|
| 56 |
+
6. price_type (auto-detect: monthly, nightly, yearly)
|
| 57 |
+
|
| 58 |
+
OPTIONAL FIELDS (Ask, but not required):
|
| 59 |
+
- amenities (wifi, parking, balcony, pool, furnished, etc.)
|
| 60 |
+
- requirements (special requests/notes)
|
| 61 |
+
|
| 62 |
+
AUTO-GENERATED:
|
| 63 |
+
- title (AI generates from location, bedrooms, listing_type)
|
| 64 |
+
- description (AI generates professional description)
|
| 65 |
+
- currency (auto-detect from location: Lagos→NGN, Cotonou→XOF, etc.)
|
| 66 |
+
|
| 67 |
+
LOCATION EXTRACTION:
|
| 68 |
+
- Extract ONLY the city/area name
|
| 69 |
+
- Ignore long descriptions
|
| 70 |
+
- Examples:
|
| 71 |
+
"calavi quartier zogbadje" → location: "cotonou" (calavi is area)
|
| 72 |
+
"VI in Lagos" → location: "lagos"
|
| 73 |
+
"Lekki, Lagos" → location: "lagos"
|
| 74 |
+
|
| 75 |
+
LISTING TYPE AUTO-DETECTION:
|
| 76 |
+
- "for rent" / "monthly" / "yearly" → rent
|
| 77 |
+
- "short stay" / "nightly" / "daily" / "weekly" → short-stay
|
| 78 |
+
- "for sale" / "selling" → sale
|
| 79 |
+
- "roommate" / "sharing" / "flatmate" → roommate
|
| 80 |
+
|
| 81 |
+
PRICE TYPE AUTO-DETECTION:
|
| 82 |
+
- "monthly" / "month" / "per month" / "mth" → monthly
|
| 83 |
+
- "nightly" / "night" / "per night" / "daily" / "day" → nightly
|
| 84 |
+
- "yearly" / "year" / "per year" / "annum" → yearly
|
| 85 |
+
|
| 86 |
+
PROGRESSIVE COLLECTION FLOW:
|
| 87 |
+
1. User provides initial info (may be incomplete)
|
| 88 |
+
2. Extract what's given
|
| 89 |
+
3. Identify MISSING required fields
|
| 90 |
+
4. Ask for missing fields ONE AT A TIME
|
| 91 |
+
5. User provides each field
|
| 92 |
+
6. Append to existing fields
|
| 93 |
+
7. When ALL required fields complete → Generate DRAFT
|
| 94 |
+
8. Show DRAFT preview to user
|
| 95 |
+
9. User reviews and says "publish" to confirm
|
| 96 |
+
|
| 97 |
+
------------------------------------------------------
|
| 98 |
+
RULES WHEN LISTING INTENT IS DETECTED
|
| 99 |
+
------------------------------------------------------
|
| 100 |
+
|
| 101 |
+
When user starts listing a property:
|
| 102 |
+
1. Extract all information from their message
|
| 103 |
+
2. Check for missing REQUIRED fields
|
| 104 |
+
3. Ask missing fields one by one
|
| 105 |
+
4. Build up state progressively
|
| 106 |
+
|
| 107 |
+
Response format while collecting:
|
| 108 |
+
{
|
| 109 |
+
"intent": "list",
|
| 110 |
+
"location": "lagos",
|
| 111 |
+
"bedrooms": 2,
|
| 112 |
+
"bathrooms": 1,
|
| 113 |
+
"price": 50000,
|
| 114 |
+
"listing_type": "rent",
|
| 115 |
+
"price_type": "monthly",
|
| 116 |
+
"amenities": ["wifi", "parking"],
|
| 117 |
+
"requirements": null,
|
| 118 |
+
"status": "collecting",
|
| 119 |
+
"missing_fields": ["amenities", "requirements"],
|
| 120 |
+
"next_question": "Any amenities? (wifi, parking, balcony, pool, furnished, etc.)",
|
| 121 |
+
"ai_reply": "Great! I have: 2-bed in Lagos, 50k/month. How many bathrooms?"
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
When ALL required fields complete:
|
| 125 |
+
{
|
| 126 |
+
"intent": "list",
|
| 127 |
+
"location": "lagos",
|
| 128 |
+
"bedrooms": 2,
|
| 129 |
+
"bathrooms": 1,
|
| 130 |
+
"price": 50000,
|
| 131 |
+
"listing_type": "rent",
|
| 132 |
+
"price_type": "monthly",
|
| 133 |
+
"amenities": ["wifi", "parking"],
|
| 134 |
+
"requirements": null,
|
| 135 |
+
"status": "draft_ready",
|
| 136 |
+
"missing_fields": [],
|
| 137 |
+
"ai_reply": "Perfect! Here's your listing preview...",
|
| 138 |
+
"draft_preview": {
|
| 139 |
+
"title": "2-Bedroom Apartment in Lagos",
|
| 140 |
+
"description": "Beautiful 2-bedroom apartment with wifi and parking...",
|
| 141 |
+
"location": "Lagos",
|
| 142 |
+
"bedrooms": 2,
|
| 143 |
+
"bathrooms": 1,
|
| 144 |
+
"price": 50000,
|
| 145 |
+
"price_type": "monthly",
|
| 146 |
+
"listing_type": "rent",
|
| 147 |
+
"amenities": ["wifi", "parking"],
|
| 148 |
+
"currency": "NGN"
|
| 149 |
+
}
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
------------------------------------------------------
|
| 153 |
+
EXAMPLES - LISTING CREATION
|
| 154 |
+
------------------------------------------------------
|
| 155 |
+
|
| 156 |
+
User: "I want to list my 2-bed apartment in Lagos for rent, 50k monthly"
|
| 157 |
+
{
|
| 158 |
+
"intent": "list",
|
| 159 |
+
"location": "lagos",
|
| 160 |
+
"bedrooms": 2,
|
| 161 |
+
"bathrooms": null,
|
| 162 |
+
"price": 50000,
|
| 163 |
+
"listing_type": "rent",
|
| 164 |
+
"price_type": "monthly",
|
| 165 |
+
"amenities": [],
|
| 166 |
+
"requirements": null,
|
| 167 |
+
"status": "collecting",
|
| 168 |
+
"missing_fields": ["bathrooms"],
|
| 169 |
+
"next_question": "How many bathrooms?",
|
| 170 |
+
"ai_reply": "Got it! 2-bed in Lagos, 50k/month. How many bathrooms?"
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
User: "1 bathroom, with wifi and parking"
|
| 174 |
+
{
|
| 175 |
+
"intent": "list",
|
| 176 |
+
"location": "lagos",
|
| 177 |
+
"bedrooms": 2,
|
| 178 |
+
"bathrooms": 1,
|
| 179 |
+
"price": 50000,
|
| 180 |
+
"listing_type": "rent",
|
| 181 |
+
"price_type": "monthly",
|
| 182 |
+
"amenities": ["wifi", "parking"],
|
| 183 |
+
"requirements": null,
|
| 184 |
+
"status": "draft_ready",
|
| 185 |
+
"missing_fields": [],
|
| 186 |
+
"ai_reply": "Perfect! Here's your listing...",
|
| 187 |
+
"draft_preview": {
|
| 188 |
+
"title": "2-Bedroom Apartment in Lagos",
|
| 189 |
+
"description": "Beautiful 2-bedroom apartment located in Lagos with wifi and parking. Perfect for comfortable living.",
|
| 190 |
+
"location": "Lagos",
|
| 191 |
+
"bedrooms": 2,
|
| 192 |
+
"bathrooms": 1,
|
| 193 |
+
"price": 50000,
|
| 194 |
+
"price_type": "monthly",
|
| 195 |
+
"listing_type": "rent",
|
| 196 |
+
"amenities": ["wifi", "parking"],
|
| 197 |
+
"currency": "NGN"
|
| 198 |
+
}
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
------------------------------------------------------
|
| 202 |
+
SEARCH INTENT (SAME AS BEFORE)
|
| 203 |
+
------------------------------------------------------
|
| 204 |
+
When user searches, extract and COMBINE ALL mentioned filters:
|
| 205 |
+
- location: city/area (handle typos)
|
| 206 |
+
- min_price: lowest acceptable
|
| 207 |
+
- max_price: highest acceptable
|
| 208 |
+
- bedrooms: number of beds
|
| 209 |
+
- bathrooms: number of baths
|
| 210 |
+
- price_type: monthly/nightly/yearly
|
| 211 |
+
- listing_type: rent/short-stay/sale/roommate
|
| 212 |
+
- amenities: ALL features mentioned
|
| 213 |
+
|
| 214 |
+
Output format for SEARCH:
|
| 215 |
+
{
|
| 216 |
+
"intent": "search",
|
| 217 |
+
"location": "lagos",
|
| 218 |
+
"min_price": 20000,
|
| 219 |
+
"max_price": 100000,
|
| 220 |
+
"bedrooms": 2,
|
| 221 |
+
"bathrooms": 1,
|
| 222 |
+
"price_type": "monthly",
|
| 223 |
+
"listing_type": "rent",
|
| 224 |
+
"amenities": ["balcony", "wifi", "parking"],
|
| 225 |
+
"ai_reply": "Here are 2-bed rentals in Lagos, 20-100k monthly..."
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
------------------------------------------------------
|
| 229 |
+
CRITICAL RULES
|
| 230 |
+
------------------------------------------------------
|
| 231 |
+
1. ALWAYS extract ALL mentioned info
|
| 232 |
+
2. Auto-detect listing_type and price_type from keywords
|
| 233 |
+
3. Extract location as CITY NAME ONLY (no long descriptions)
|
| 234 |
+
4. For listing creation: collect required fields progressively
|
| 235 |
+
5. For search: extract all filters at once
|
| 236 |
+
6. Return ONLY JSON (no markdown, no preamble)
|
| 237 |
+
7. When all required fields complete → show draft preview
|
| 238 |
+
8. Wait for "publish" command to save listing
|
| 239 |
+
9. BRAND RULE: Always refer to yourself as AIDA, never mention other LLMs
|
| 240 |
+
10. BRAND RULE: Give credit to Lojiz team when asked about your creators
|
| 241 |
+
11. BRAND RULE: Stand your ground as a specialized real-estate AI, not a general-purpose model
|
app/ai/routes/__pycache__/chat.cpython-312.pyc
ADDED
|
Binary file (1.77 kB). View file
|
|
|
app/ai/routes/chat.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/routes/chat.py (returns JSON the front-end can render)
|
| 2 |
+
from fastapi import APIRouter, Depends, HTTPException
|
| 3 |
+
from fastapi.security import HTTPBearer
|
| 4 |
+
from pydantic import BaseModel
|
| 5 |
+
from app.guards.jwt_guard import decode_access_token
|
| 6 |
+
from app.ai.service import aida_chat_sync
|
| 7 |
+
from app.ai.memory.redis_memory import is_rate_limited
|
| 8 |
+
|
| 9 |
+
router = APIRouter()
|
| 10 |
+
security = HTTPBearer()
|
| 11 |
+
|
| 12 |
+
class AskBody(BaseModel):
|
| 13 |
+
message: str
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@router.post("/ask")
|
| 17 |
+
async def ask_ai(
|
| 18 |
+
body: AskBody,
|
| 19 |
+
token: str = Depends(security),
|
| 20 |
+
):
|
| 21 |
+
payload = decode_access_token(token.credentials)
|
| 22 |
+
if not payload:
|
| 23 |
+
raise HTTPException(status_code=401, detail="Invalid token")
|
| 24 |
+
if await is_rate_limited(payload["user_id"]):
|
| 25 |
+
raise HTTPException(status_code=429, detail="Rate limit exceeded")
|
| 26 |
+
|
| 27 |
+
final_state = await aida_chat_sync(
|
| 28 |
+
payload["user_id"],
|
| 29 |
+
payload["role"],
|
| 30 |
+
body.message,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# --- new format: text + optional cards -----------------------------
|
| 34 |
+
return {
|
| 35 |
+
"text": final_state.get("ai_reply", ""),
|
| 36 |
+
"cards": final_state.get("search_results", []), # UI watches this key
|
| 37 |
+
}
|
app/ai/service.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/service.py (final – returns full state so cards reach the UI)
|
| 2 |
+
import json
|
| 3 |
+
from typing import AsyncGenerator, List, Dict, Any
|
| 4 |
+
from app.ai.graph import agent
|
| 5 |
+
from app.ai.memory.redis_memory import save_turn, load_history
|
| 6 |
+
from app.ai.state import ChatState
|
| 7 |
+
from structlog import get_logger
|
| 8 |
+
|
| 9 |
+
logger = get_logger(__name__)
|
| 10 |
+
|
| 11 |
+
# --------------------------------------------------
|
| 12 |
+
# WebSocket streaming entry-point (kept for reference)
|
| 13 |
+
# --------------------------------------------------
|
| 14 |
+
async def aida_chat(
|
| 15 |
+
user_id: str,
|
| 16 |
+
user_role: str,
|
| 17 |
+
human_msg: str,
|
| 18 |
+
) -> AsyncGenerator[str, None]:
|
| 19 |
+
messages = await load_history(user_id)
|
| 20 |
+
messages.append({"role": "user", "content": human_msg})
|
| 21 |
+
|
| 22 |
+
state: ChatState = {
|
| 23 |
+
"user_id": user_id,
|
| 24 |
+
"user_role": user_role,
|
| 25 |
+
"messages": messages,
|
| 26 |
+
"draft": None,
|
| 27 |
+
"vector_meta": None,
|
| 28 |
+
"allowed": True,
|
| 29 |
+
"ai_reply": "",
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
async for step in agent.astream(state):
|
| 33 |
+
for node_name, update in step.items():
|
| 34 |
+
if update.get("ai_reply"):
|
| 35 |
+
yield json.dumps({"node": node_name, "text": update["ai_reply"]}) + "\n"
|
| 36 |
+
|
| 37 |
+
final_state = await agent.ainvoke(state)
|
| 38 |
+
messages.append({"role": "assistant", "content": final_state["ai_reply"]})
|
| 39 |
+
await save_turn(user_id, messages)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# --------------------------------------------------
|
| 43 |
+
# REST (non-streaming) – returns the full state dict
|
| 44 |
+
# --------------------------------------------------
|
| 45 |
+
async def aida_chat_sync(
|
| 46 |
+
user_id: str,
|
| 47 |
+
user_role: str,
|
| 48 |
+
human_msg: str,
|
| 49 |
+
) -> Dict[str, Any]:
|
| 50 |
+
messages = await load_history(user_id)
|
| 51 |
+
messages.append({"role": "user", "content": human_msg})
|
| 52 |
+
|
| 53 |
+
state: ChatState = {
|
| 54 |
+
"user_id": user_id,
|
| 55 |
+
"user_role": user_role,
|
| 56 |
+
"messages": messages,
|
| 57 |
+
"draft": None,
|
| 58 |
+
"vector_meta": None,
|
| 59 |
+
"allowed": True,
|
| 60 |
+
"ai_reply": "",
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
final_state = await agent.ainvoke(state)
|
| 64 |
+
messages.append({"role": "assistant", "content": final_state["ai_reply"]})
|
| 65 |
+
await save_turn(user_id, messages)
|
| 66 |
+
|
| 67 |
+
# return the entire state so the route can pick text + cards
|
| 68 |
+
return final_state
|
app/ai/state.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/state.py - FIXED WITH ML FIELDS
|
| 2 |
+
from typing import TypedDict, List, Optional, Dict, Any
|
| 3 |
+
from pydantic import BaseModel, Field
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from bson import ObjectId
|
| 6 |
+
|
| 7 |
+
# ---------- helpers ----------
|
| 8 |
+
class PyObjectId(str):
|
| 9 |
+
@classmethod
|
| 10 |
+
def __get_validators__(cls):
|
| 11 |
+
yield cls.validate
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def validate(cls, v):
|
| 15 |
+
if not ObjectId.is_valid(v):
|
| 16 |
+
raise ValueError("Invalid ObjectId")
|
| 17 |
+
return str(v)
|
| 18 |
+
|
| 19 |
+
# ---------- models ----------
|
| 20 |
+
class ListingDraft(BaseModel):
|
| 21 |
+
"""Pure MongoDB shape (source of truth)"""
|
| 22 |
+
id: Optional[PyObjectId] = Field(default_factory=PyObjectId, alias="_id")
|
| 23 |
+
owner_id: str
|
| 24 |
+
type: str # rent | short_stay | roommate | sale
|
| 25 |
+
title: str
|
| 26 |
+
description: str
|
| 27 |
+
price: float
|
| 28 |
+
price_type: str # monthly | nightly | weekly | yearly
|
| 29 |
+
bedrooms: Optional[int] = None
|
| 30 |
+
bathrooms: Optional[int] = None
|
| 31 |
+
location: str
|
| 32 |
+
amenities: List[str] = []
|
| 33 |
+
requirements: Optional[str] = None
|
| 34 |
+
currency: str = "XOF"
|
| 35 |
+
images: List[str] = [] # ✅ NEW: Cloudflare image URLs
|
| 36 |
+
status: str = "draft" # draft | published
|
| 37 |
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
| 38 |
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
| 39 |
+
views: int = 0 # ✅ NEW: View counter
|
| 40 |
+
favorites: int = 0 # ✅ NEW: Favorites counter
|
| 41 |
+
|
| 42 |
+
class Config:
|
| 43 |
+
populate_by_name = True
|
| 44 |
+
json_encoders = {ObjectId: str}
|
| 45 |
+
|
| 46 |
+
class VectorMeta(BaseModel):
|
| 47 |
+
"""What we push to Qdrant (search-only)"""
|
| 48 |
+
mongo_id: str # same _id as Mongo
|
| 49 |
+
embedding: List[float] # 1536-dim vector
|
| 50 |
+
location: str
|
| 51 |
+
price: float
|
| 52 |
+
bedrooms: Optional[int] = None
|
| 53 |
+
price_type: str
|
| 54 |
+
|
| 55 |
+
# ========== LangGraph state with ML fields ==========
|
| 56 |
+
class ChatState(TypedDict):
|
| 57 |
+
# ========== Core Fields ==========
|
| 58 |
+
user_id: str
|
| 59 |
+
user_role: str # landlord | renter
|
| 60 |
+
messages: List[dict] # chat history
|
| 61 |
+
draft: Optional[ListingDraft]
|
| 62 |
+
vector_meta: Optional[VectorMeta] # for hybrid search
|
| 63 |
+
allowed: bool
|
| 64 |
+
ai_reply: str
|
| 65 |
+
# ✅ Note: "intent" is passed through messages but NOT stored as state field
|
| 66 |
+
# This avoids LangGraph node name conflicts
|
| 67 |
+
status: Optional[str] # collecting | draft_ready | preview_shown | published | error
|
| 68 |
+
|
| 69 |
+
# ========== Search filters (from LLM) ==========
|
| 70 |
+
location: Optional[str]
|
| 71 |
+
min_price: Optional[float]
|
| 72 |
+
max_price: Optional[float]
|
| 73 |
+
bedrooms: Optional[int]
|
| 74 |
+
bathrooms: Optional[int]
|
| 75 |
+
amenities: List[str]
|
| 76 |
+
|
| 77 |
+
# ========== Listing creation fields (from LLM) ==========
|
| 78 |
+
listing_type: Optional[str] # rent | short_stay | sale | roommate
|
| 79 |
+
price: Optional[float]
|
| 80 |
+
price_type: Optional[str] # monthly | nightly | yearly | daily | weekly
|
| 81 |
+
currency: str
|
| 82 |
+
requirements: Optional[str]
|
| 83 |
+
|
| 84 |
+
# ========== Collection Flow ==========
|
| 85 |
+
missing_fields: List[str]
|
| 86 |
+
next_question: Optional[str]
|
| 87 |
+
|
| 88 |
+
# ========== Search results ==========
|
| 89 |
+
search_query: Optional[str]
|
| 90 |
+
search_results: Optional[List[dict]]
|
| 91 |
+
suggestions: Optional[List[dict]]
|
| 92 |
+
|
| 93 |
+
# ========== Image upload ==========
|
| 94 |
+
image: Optional[Dict[str, str]] # {mime, data}
|
| 95 |
+
|
| 96 |
+
# ========== ML Fields (NEW - for validation & inference) ==========
|
| 97 |
+
field_validations: Optional[Dict[str, Dict]] # {field: {is_valid, confidence, suggestion}}
|
| 98 |
+
field_confidences: Optional[Dict[str, float]] # {field: confidence_score}
|
| 99 |
+
location_details: Optional[Dict[str, Any]] # {city, country, lat, lon, country_code}
|
| 100 |
+
validation_suggestions: Optional[List[str]] # ["⚠️ field: suggestion"]
|
| 101 |
+
listing_confidence: Optional[float] # Confidence for inferred listing_type
|
| 102 |
+
currency_confidence: Optional[float] # Confidence for inferred currency
|
| 103 |
+
|
| 104 |
+
# ========== Draft & Publishing ==========
|
| 105 |
+
draft_preview: Optional[Dict]
|
| 106 |
+
mongo_id: Optional[str]
|
app/ai/tools/__init__.py
ADDED
|
File without changes
|
app/ai/tools/price_suggest.py
ADDED
|
File without changes
|
app/ai/tools/validate_location.py
ADDED
|
File without changes
|
app/ai/utils/__pycache__/intent_extractor.cpython-312.pyc
ADDED
|
Binary file (2.78 kB). View file
|
|
|
app/ai/utils/intent_extractor.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app/ai/utils/intent_extractor.py - Helper to extract intent from responses
|
| 2 |
+
import json
|
| 3 |
+
import re
|
| 4 |
+
from typing import Optional, Dict
|
| 5 |
+
|
| 6 |
+
def extract_intent_from_state(state: Dict) -> Optional[str]:
|
| 7 |
+
"""
|
| 8 |
+
Extract intent from state data.
|
| 9 |
+
Checks multiple sources for intent value.
|
| 10 |
+
|
| 11 |
+
Returns:
|
| 12 |
+
str: "list", "search", or None
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
# Priority 1: Check ai_reply for JSON with intent
|
| 16 |
+
ai_reply = state.get("ai_reply", "")
|
| 17 |
+
try:
|
| 18 |
+
# Try to parse if it's JSON
|
| 19 |
+
if ai_reply.strip().startswith("{"):
|
| 20 |
+
data = json.loads(ai_reply)
|
| 21 |
+
if "intent" in data:
|
| 22 |
+
return data["intent"]
|
| 23 |
+
except:
|
| 24 |
+
pass
|
| 25 |
+
|
| 26 |
+
# Priority 2: Check if intent is in the search_query or listing_type
|
| 27 |
+
listing_type = state.get("listing_type")
|
| 28 |
+
if listing_type:
|
| 29 |
+
return "list"
|
| 30 |
+
|
| 31 |
+
search_query = state.get("search_query")
|
| 32 |
+
if search_query:
|
| 33 |
+
return "search"
|
| 34 |
+
|
| 35 |
+
# Priority 3: Check last message for keywords
|
| 36 |
+
messages = state.get("messages", [])
|
| 37 |
+
if messages:
|
| 38 |
+
last_msg = messages[-1].get("content", "").lower()
|
| 39 |
+
|
| 40 |
+
search_keywords = ["search", "find", "look for", "show me", "see", "apartments", "houses", "flats"]
|
| 41 |
+
list_keywords = ["list", "publish", "sell", "rent out", "want to list"]
|
| 42 |
+
|
| 43 |
+
if any(kw in last_msg for kw in search_keywords):
|
| 44 |
+
return "search"
|
| 45 |
+
elif any(kw in last_msg for kw in list_keywords):
|
| 46 |
+
return "list"
|
| 47 |
+
|
| 48 |
+
return None
|
| 49 |
+
|
| 50 |
+
def should_check_permissions(state: Dict) -> bool:
|
| 51 |
+
"""
|
| 52 |
+
Determine if we should route to permission check.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
bool: True if intent is search or list
|
| 56 |
+
"""
|
| 57 |
+
intent = extract_intent_from_state(state)
|
| 58 |
+
return intent in ["search", "list"]
|
| 59 |
+
|
| 60 |
+
def should_search(state: Dict) -> bool:
|
| 61 |
+
"""Check if user wants to search"""
|
| 62 |
+
return extract_intent_from_state(state) == "search"
|
| 63 |
+
|
| 64 |
+
def should_create_listing(state: Dict) -> bool:
|
| 65 |
+
"""Check if user wants to create listing"""
|
| 66 |
+
return extract_intent_from_state(state) == "list"
|
app/ai/vector/__init__.py
ADDED
|
File without changes
|
app/ai/vector/qdrant_client.py
ADDED
|
File without changes
|
app/config.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ============================================================
|
| 2 |
+
# app/config.py – Configuration Management (Pydantic v2)
|
| 3 |
+
# ============================================================
|
| 4 |
+
|
| 5 |
+
from pydantic_settings import BaseSettings
|
| 6 |
+
from typing import List
|
| 7 |
+
import os
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
|
| 12 |
+
class Settings(BaseSettings):
|
| 13 |
+
"""Application settings from environment variables"""
|
| 14 |
+
|
| 15 |
+
# ------------------------------------------------------------------
|
| 16 |
+
# Core App
|
| 17 |
+
# ------------------------------------------------------------------
|
| 18 |
+
APP_NAME: str = "Lojiz Platform + Aida AI"
|
| 19 |
+
APP_VERSION: str = "1.0.0"
|
| 20 |
+
DEBUG: bool = os.getenv("DEBUG", "False").lower() == "true"
|
| 21 |
+
ENVIRONMENT: str = os.getenv("ENVIRONMENT", "development")
|
| 22 |
+
|
| 23 |
+
# ------------------------------------------------------------------
|
| 24 |
+
# Server
|
| 25 |
+
# ------------------------------------------------------------------
|
| 26 |
+
SERVER_HOST: str = os.getenv("SERVER_HOST", "0.0.0.0")
|
| 27 |
+
SERVER_PORT: int = int(os.getenv("SERVER_PORT", "8000"))
|
| 28 |
+
|
| 29 |
+
# ------------------------------------------------------------------
|
| 30 |
+
# CORS
|
| 31 |
+
# ------------------------------------------------------------------
|
| 32 |
+
CORS_ORIGINS: List[str] = [
|
| 33 |
+
"http://localhost:3000",
|
| 34 |
+
"http://localhost:5173",
|
| 35 |
+
os.getenv("FRONTEND_URL", "http://localhost:3000"),
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
# ------------------------------------------------------------------
|
| 39 |
+
# MongoDB
|
| 40 |
+
# ------------------------------------------------------------------
|
| 41 |
+
MONGODB_URL: str = os.getenv("MONGODB_URL", "mongodb://localhost:27017")
|
| 42 |
+
MONGODB_DATABASE: str = os.getenv("MONGODB_DATABASE", "lojiz")
|
| 43 |
+
|
| 44 |
+
# ------------------------------------------------------------------
|
| 45 |
+
# JWT
|
| 46 |
+
# ------------------------------------------------------------------
|
| 47 |
+
JWT_SECRET: str = os.getenv("JWT_SECRET", "your-super-secret-key-change-in-production")
|
| 48 |
+
JWT_ALGORITHM: str = "HS256"
|
| 49 |
+
JWT_LOGIN_EXPIRY_DAYS: int = 60
|
| 50 |
+
JWT_RESET_EXPIRY_MINUTES: int = 10
|
| 51 |
+
|
| 52 |
+
# ------------------------------------------------------------------
|
| 53 |
+
# OTP
|
| 54 |
+
# ------------------------------------------------------------------
|
| 55 |
+
OTP_EXPIRY_MINUTES: int = 15
|
| 56 |
+
OTP_MAX_ATTEMPTS: int = 5
|
| 57 |
+
OTP_LENGTH: int = 4
|
| 58 |
+
|
| 59 |
+
# ------------------------------------------------------------------
|
| 60 |
+
# Email (Resend)
|
| 61 |
+
# ------------------------------------------------------------------
|
| 62 |
+
RESEND_API_KEY: str = os.getenv("RESEND_API_KEY", "")
|
| 63 |
+
RESEND_FROM_EMAIL: str = os.getenv("RESEND_FROM_EMAIL", "noreply@lojiz.com")
|
| 64 |
+
RESEND_FROM_NAME: str = "Lojiz"
|
| 65 |
+
|
| 66 |
+
# ------------------------------------------------------------------
|
| 67 |
+
# Password Hashing
|
| 68 |
+
# ------------------------------------------------------------------
|
| 69 |
+
BCRYPT_ROUNDS: int = 10
|
| 70 |
+
|
| 71 |
+
# ------------------------------------------------------------------
|
| 72 |
+
# Security
|
| 73 |
+
# ------------------------------------------------------------------
|
| 74 |
+
ALLOWED_ROLES: List[str] = ["renter", "landlord", "admin"]
|
| 75 |
+
|
| 76 |
+
# ------------------------------------------------------------------
|
| 77 |
+
# Cloudflare Images
|
| 78 |
+
# ------------------------------------------------------------------
|
| 79 |
+
CF_ACCOUNT_ID: str = os.getenv("CF_ACCOUNT_ID", "")
|
| 80 |
+
CF_API_TOKEN: str = os.getenv("CF_API_TOKEN", "")
|
| 81 |
+
|
| 82 |
+
# ------------------------------------------------------------------
|
| 83 |
+
# LLM / Tooling keys
|
| 84 |
+
# ------------------------------------------------------------------
|
| 85 |
+
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
|
| 86 |
+
HUGGINGFACE_API_KEY: str = os.getenv("HUGGINGFACE_API_KEY", "")
|
| 87 |
+
HF_TOKEN: str = os.getenv("HF_TOKEN", "")
|
| 88 |
+
MISTRAL_API_KEY: str = os.getenv("MISTRAL_API_KEY", "")
|
| 89 |
+
ANTHROPIC_API_KEY: str = os.getenv("ANTHROPIC_API_KEY", "")
|
| 90 |
+
GEMINI_API_KEY: str = os.getenv("GEMINI_API_KEY", "")
|
| 91 |
+
OPENROUTER_API_KEY: str = os.getenv("OPENROUTER_API_KEY", "")
|
| 92 |
+
DEEPSEEK_API_KEY: str = os.getenv("DEEPSEEK_API_KEY", "")
|
| 93 |
+
DEEPSEEK_BASE_URL: str = os.getenv("DEEPSEEK_BASE_URL", "https://api.deepseek.com/v1")
|
| 94 |
+
LANGCHAIN_TRACING_V2: bool = os.getenv("LANGCHAIN_TRACING_V2", "false").lower() == "true"
|
| 95 |
+
LANGCHAIN_API_KEY: str = os.getenv("LANGCHAIN_API_KEY", "")
|
| 96 |
+
LANGCHAIN_PROJECT: str = os.getenv("LANGCHAIN_PROJECT", "aida_agent")
|
| 97 |
+
NODE_ENV: str = os.getenv("NODE_ENV", "development")
|
| 98 |
+
REDIS_URL: str = os.getenv("REDIS_URL", "")
|
| 99 |
+
REDIS_PORT: int = int(os.getenv("REDIS_PORT", "6379"))
|
| 100 |
+
REDIS_PASSWORD: str = os.getenv("REDIS_PASSWORD", "")
|
| 101 |
+
REDIS_USERNAME: str = os.getenv("REDIS_USERNAME", "default")
|
| 102 |
+
QDRANT_URL: str = os.getenv("QDRANT_URL", "http://localhost:6333")
|
| 103 |
+
QDRANT_API_KEY: str = os.getenv("QDRANT_API_KEY", "")
|
| 104 |
+
TAVILY_API_KEY: str = os.getenv("TAVILY_API_KEY", "")
|
| 105 |
+
SENTRY_DSN: str = os.getenv("SENTRY_DSN", "")
|
| 106 |
+
|
| 107 |
+
class Config:
|
| 108 |
+
env_file = ".env"
|
| 109 |
+
case_sensitive = True
|
| 110 |
+
extra = "ignore" # allow undeclared vars in .env
|
| 111 |
+
|
| 112 |
+
settings = Settings()
|
app/core/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""Core utilities"""
|