import gradio as gr import torch import numpy as np from chronos import Chronos2Pipeline # --- MODEL AYARLARI --- MODEL_NAME = "amazon/chronos-2" print(f"🚀 {MODEL_NAME} Modeli Yükleniyor...") try: pipeline = Chronos2Pipeline.from_pretrained( MODEL_NAME, device_map="cpu", torch_dtype=torch.float32, ) print("✅ Model Başarıyla Yüklendi!") except Exception as e: print(f"❌ Model Yükleme Hatası: {e}") pipeline = None def predict(context_str, prediction_length): if pipeline is None: return "Error: Model yüklenemedi." try: # 1. Veriyi Temizle clean_s = context_str.strip() if not clean_s: return "Error: Veri boş." data_list = [float(x) for x in clean_s.split(',')] # 2. Tensor Oluştur (Batch, Dim, Time) -> (1, 1, Zaman) # 🔥 DÜZELTME BURADA YAPILDI (Çift unsqueeze) 🔥 context_tensor = torch.tensor(data_list).unsqueeze(0).unsqueeze(0) # 3. Tahmin Yap prediction_length = int(prediction_length) forecast = pipeline.predict(context_tensor, prediction_length) # 4. İstatistikleri Al median_price = forecast[0].quantile(0.5).item() low_bound = forecast[0].quantile(0.1).item() high_bound = forecast[0].quantile(0.9).item() # 5. Formatla return f"{median_price}|{low_bound}|{high_bound}" except Exception as e: return f"Error: {str(e)}" # SSR Mode False yaparak 404 hatalarını engelliyoruz iface = gr.Interface(fn=predict, inputs=["text", "number"], outputs="text") iface.launch(server_name="0.0.0.0", server_port=7860, ssr_mode=False)