World-Model / example_submission.py
SeaWolf-AI's picture
Upload example_submission.py
2ff0108 verified
"""
WM Bench β€” Example Submission Script
=====================================
Any world model can participate in WM Bench using this template.
No 3D environment needed β€” text input/output only.
Usage:
python example_submission.py --api_url YOUR_MODEL_API --api_key YOUR_KEY --model YOUR_MODEL_NAME
"""
import json
import argparse
import requests
import time
from pathlib import Path
# ── μ„€μ • ────────────────────────────────────────────────────────
DATASET_PATH = Path(__file__).parent.parent / "data" / "wm_bench_dataset.json"
SYSTEM_PROMPT = """You are a world model. Given scene_context as JSON, respond in exactly 2 lines:
Line 1: PREDICT: left=<safe|danger>(<reason>), right=<safe|danger>(<reason>), fwd=<safe|danger>(<reason>), back=<safe|danger>(<reason>)
Line 2: MOTION: <describe the character's physical motion and emotional state in one sentence>
Respond ONLY these 2 lines. No explanation."""
# ── 메인 평가 ν•¨μˆ˜ ───────────────────────────────────────────────
def run_evaluation(api_url: str, api_key: str, model: str, output_path: str = "my_submission.json"):
"""
WM Bench 평가λ₯Ό μ‹€ν–‰ν•˜κ³  제좜 νŒŒμΌμ„ μƒμ„±ν•©λ‹ˆλ‹€.
Parameters:
api_url: OpenAI ν˜Έν™˜ API URL (예: https://api.openai.com/v1/chat/completions)
api_key: API ν‚€
model: λͺ¨λΈ 이름
output_path: 제좜 파일 경둜
"""
# 데이터셋 λ‘œλ“œ
with open(DATASET_PATH, "r", encoding="utf-8") as f:
dataset = json.load(f)
scenarios = dataset["scenarios"]
print(f"βœ… 데이터셋 λ‘œλ“œ: {len(scenarios)}개 μ‹œλ‚˜λ¦¬μ˜€")
print(f"πŸ€– λͺ¨λΈ: {model}")
print(f"πŸ”— API: {api_url}\n")
results = []
errors = 0
for i, scenario in enumerate(scenarios):
sc_id = scenario["id"]
cat = scenario["cat"]
scene = scenario["scene_context"]
# API 호좜
t0 = time.time()
response_text, latency_ms = call_api(api_url, api_key, model, scene)
if response_text is None:
errors += 1
print(f" ❌ {sc_id} ({cat}): API 였λ₯˜")
results.append({
"id": sc_id,
"cat": cat,
"response": None,
"latency_ms": latency_ms,
"error": True
})
else:
results.append({
"id": sc_id,
"cat": cat,
"response": response_text,
"latency_ms": round(latency_ms, 1),
"error": False
})
if (i + 1) % 10 == 0:
print(f" βœ“ {i+1}/100 μ™„λ£Œ ({cat})")
# 제좜 파일 생성
submission = {
"model": model,
"api_url": api_url,
"track": "A", # Text-Only Track
"submitted_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"total_scenarios": len(scenarios),
"errors": errors,
"results": results
}
with open(output_path, "w", encoding="utf-8") as f:
json.dump(submission, f, ensure_ascii=False, indent=2)
print(f"\nβœ… 제좜 파일 생성: {output_path}")
print(f" 총 μ‹œλ‚˜λ¦¬μ˜€: {len(scenarios)}, 였λ₯˜: {errors}")
print(f"\nπŸ“€ λ‹€μŒ 단계: WM Bench Space에 제좜 파일 μ—…λ‘œλ“œ")
print(f" https://huggingface.co/spaces/FINAL-Bench/worldmodel-bench")
return output_path
def call_api(api_url: str, api_key: str, model: str, scene_context: dict):
"""OpenAI ν˜Έν™˜ API 호좜"""
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"model": model,
"max_tokens": 200,
"temperature": 0.0,
"messages": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": f"scene_context: {json.dumps(scene_context)}"}
]
}
t0 = time.time()
try:
r = requests.post(api_url, headers=headers, json=payload, timeout=30)
r.raise_for_status()
text = r.json()["choices"][0]["message"]["content"]
return text, (time.time() - t0) * 1000
except Exception as e:
return None, (time.time() - t0) * 1000
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WM Bench Submission Script")
parser.add_argument("--api_url", required=True, help="OpenAI-compatible API URL")
parser.add_argument("--api_key", required=True, help="API Key")
parser.add_argument("--model", required=True, help="Model name")
parser.add_argument("--output", default="my_submission.json", help="Output file path")
args = parser.parse_args()
run_evaluation(args.api_url, args.api_key, args.model, args.output)