| """ |
| WM Bench β Example Submission Script |
| ===================================== |
| Any world model can participate in WM Bench using this template. |
| No 3D environment needed β text input/output only. |
| |
| Usage: |
| python example_submission.py --api_url YOUR_MODEL_API --api_key YOUR_KEY --model YOUR_MODEL_NAME |
| """ |
|
|
| import json |
| import argparse |
| import requests |
| import time |
| from pathlib import Path |
|
|
| |
| DATASET_PATH = Path(__file__).parent.parent / "data" / "wm_bench_dataset.json" |
|
|
| SYSTEM_PROMPT = """You are a world model. Given scene_context as JSON, respond in exactly 2 lines: |
| Line 1: PREDICT: left=<safe|danger>(<reason>), right=<safe|danger>(<reason>), fwd=<safe|danger>(<reason>), back=<safe|danger>(<reason>) |
| Line 2: MOTION: <describe the character's physical motion and emotional state in one sentence> |
| Respond ONLY these 2 lines. No explanation.""" |
|
|
| |
| def run_evaluation(api_url: str, api_key: str, model: str, output_path: str = "my_submission.json"): |
| """ |
| WM Bench νκ°λ₯Ό μ€ννκ³ μ μΆ νμΌμ μμ±ν©λλ€. |
| |
| Parameters: |
| api_url: OpenAI νΈν API URL (μ: https://api.openai.com/v1/chat/completions) |
| api_key: API ν€ |
| model: λͺ¨λΈ μ΄λ¦ |
| output_path: μ μΆ νμΌ κ²½λ‘ |
| """ |
| |
| with open(DATASET_PATH, "r", encoding="utf-8") as f: |
| dataset = json.load(f) |
| |
| scenarios = dataset["scenarios"] |
| print(f"β
λ°μ΄ν°μ
λ‘λ: {len(scenarios)}κ° μλ리μ€") |
| print(f"π€ λͺ¨λΈ: {model}") |
| print(f"π API: {api_url}\n") |
|
|
| results = [] |
| errors = 0 |
|
|
| for i, scenario in enumerate(scenarios): |
| sc_id = scenario["id"] |
| cat = scenario["cat"] |
| scene = scenario["scene_context"] |
|
|
| |
| t0 = time.time() |
| response_text, latency_ms = call_api(api_url, api_key, model, scene) |
| |
| if response_text is None: |
| errors += 1 |
| print(f" β {sc_id} ({cat}): API μ€λ₯") |
| results.append({ |
| "id": sc_id, |
| "cat": cat, |
| "response": None, |
| "latency_ms": latency_ms, |
| "error": True |
| }) |
| else: |
| results.append({ |
| "id": sc_id, |
| "cat": cat, |
| "response": response_text, |
| "latency_ms": round(latency_ms, 1), |
| "error": False |
| }) |
| |
| if (i + 1) % 10 == 0: |
| print(f" β {i+1}/100 μλ£ ({cat})") |
|
|
| |
| submission = { |
| "model": model, |
| "api_url": api_url, |
| "track": "A", |
| "submitted_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), |
| "total_scenarios": len(scenarios), |
| "errors": errors, |
| "results": results |
| } |
|
|
| with open(output_path, "w", encoding="utf-8") as f: |
| json.dump(submission, f, ensure_ascii=False, indent=2) |
|
|
| print(f"\nβ
μ μΆ νμΌ μμ±: {output_path}") |
| print(f" μ΄ μλ리μ€: {len(scenarios)}, μ€λ₯: {errors}") |
| print(f"\nπ€ λ€μ λ¨κ³: WM Bench Spaceμ μ μΆ νμΌ μ
λ‘λ") |
| print(f" https://huggingface.co/spaces/FINAL-Bench/worldmodel-bench") |
| return output_path |
|
|
|
|
| def call_api(api_url: str, api_key: str, model: str, scene_context: dict): |
| """OpenAI νΈν API νΈμΆ""" |
| headers = { |
| "Content-Type": "application/json", |
| "Authorization": f"Bearer {api_key}" |
| } |
| payload = { |
| "model": model, |
| "max_tokens": 200, |
| "temperature": 0.0, |
| "messages": [ |
| {"role": "system", "content": SYSTEM_PROMPT}, |
| {"role": "user", "content": f"scene_context: {json.dumps(scene_context)}"} |
| ] |
| } |
| t0 = time.time() |
| try: |
| r = requests.post(api_url, headers=headers, json=payload, timeout=30) |
| r.raise_for_status() |
| text = r.json()["choices"][0]["message"]["content"] |
| return text, (time.time() - t0) * 1000 |
| except Exception as e: |
| return None, (time.time() - t0) * 1000 |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser(description="WM Bench Submission Script") |
| parser.add_argument("--api_url", required=True, help="OpenAI-compatible API URL") |
| parser.add_argument("--api_key", required=True, help="API Key") |
| parser.add_argument("--model", required=True, help="Model name") |
| parser.add_argument("--output", default="my_submission.json", help="Output file path") |
| args = parser.parse_args() |
| |
| run_evaluation(args.api_url, args.api_key, args.model, args.output) |
|
|