words2csv / openai_backend.py
snake11235's picture
feat: refactor model configuration to use unified MODELS_MAP with backend routing
9e38f34
import os
import base64
import time
from io import BytesIO
from typing import Optional
from PIL import Image
from logging_helper import log as _log, log_debug as _log_debug, _log_model_response
from image_utils import _pil_image_to_base64_jpeg
from common import MODELS_MAP
try:
from openai import OpenAI
except ImportError: # pragma: no cover
OpenAI = None # type: ignore
def _run_openai_vision(image: Image.Image, prompt: str, model_name: str) -> str:
if OpenAI is None:
raise RuntimeError("openai package is not installed. Please install it to use ChatGPT 5.2 backend.")
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise RuntimeError("OPENAI_API_KEY environment variable is not set.")
client = OpenAI(api_key=api_key)
img_b64 = _pil_image_to_base64_jpeg(image)
_log_debug(f"Using OpenAI model: {model_name}")
_log_debug(f"Input image size: {image.size}")
start_time = time.perf_counter()
response = client.chat.completions.create(
model=model_name,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_b64}"},
},
],
}
],
max_completion_tokens=4048,
)
duration = time.perf_counter() - start_time
content = response.choices[0].message.content or ""
_log_model_response(
model_name=model_name,
content=content,
duration=duration,
usage=response.usage,
pricing=MODELS_MAP,
)
return content