Commit
·
5f17daf
1
Parent(s):
79f3d9e
Trim prompt modes to official v2 ones (document, free)
Browse filesDeepSeek-OCR-2 model card only documents two prompts.
Remove image/figure/describe modes carried over from v1.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- deepseek-ocr2-vllm.py +5 -18
deepseek-ocr2-vllm.py
CHANGED
|
@@ -64,13 +64,10 @@ from vllm.model_executor.models.deepseek_ocr import NGramPerReqLogitsProcessor
|
|
| 64 |
logging.basicConfig(level=logging.INFO)
|
| 65 |
logger = logging.getLogger(__name__)
|
| 66 |
|
| 67 |
-
# Prompt mode presets (from DeepSeek-OCR
|
| 68 |
PROMPT_MODES = {
|
| 69 |
"document": "<image>\n<|grounding|>Convert the document to markdown.",
|
| 70 |
-
"image": "<image>\n<|grounding|>OCR this image.",
|
| 71 |
"free": "<image>\nFree OCR.",
|
| 72 |
-
"figure": "<image>\nParse the figure.",
|
| 73 |
-
"describe": "<image>\nDescribe this image in detail.",
|
| 74 |
}
|
| 75 |
|
| 76 |
|
|
@@ -428,17 +425,13 @@ if __name__ == "__main__":
|
|
| 428 |
print("\nExample usage:")
|
| 429 |
print("\n1. Basic OCR conversion (document mode with grounding):")
|
| 430 |
print(" uv run deepseek-ocr2-vllm.py document-images markdown-docs")
|
| 431 |
-
print("\n2.
|
| 432 |
-
print(
|
| 433 |
-
" uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure"
|
| 434 |
-
)
|
| 435 |
-
print("\n3. Free OCR without layout:")
|
| 436 |
print(" uv run deepseek-ocr2-vllm.py images text --prompt-mode free")
|
| 437 |
-
print("\
|
| 438 |
print(
|
| 439 |
" uv run deepseek-ocr2-vllm.py large-dataset test-output --max-samples 10"
|
| 440 |
)
|
| 441 |
-
print("\
|
| 442 |
print(" hf jobs uv run --flavor l4x1 \\")
|
| 443 |
print(" -s HF_TOKEN \\")
|
| 444 |
print(
|
|
@@ -456,18 +449,12 @@ if __name__ == "__main__":
|
|
| 456 |
epilog="""
|
| 457 |
Prompt Modes:
|
| 458 |
document Convert document to markdown with grounding (default)
|
| 459 |
-
image OCR any image with grounding
|
| 460 |
free Free OCR without layout preservation
|
| 461 |
-
figure Parse figures from documents
|
| 462 |
-
describe Generate detailed image descriptions
|
| 463 |
|
| 464 |
Examples:
|
| 465 |
-
# Basic usage
|
| 466 |
uv run deepseek-ocr2-vllm.py my-images-dataset ocr-results
|
| 467 |
|
| 468 |
-
# Parse figures from a document dataset
|
| 469 |
-
uv run deepseek-ocr2-vllm.py scientific-papers figures --prompt-mode figure
|
| 470 |
-
|
| 471 |
# Free OCR without layout
|
| 472 |
uv run deepseek-ocr2-vllm.py images text --prompt-mode free
|
| 473 |
|
|
|
|
| 64 |
logging.basicConfig(level=logging.INFO)
|
| 65 |
logger = logging.getLogger(__name__)
|
| 66 |
|
| 67 |
+
# Prompt mode presets (from DeepSeek-OCR-2 model card)
|
| 68 |
PROMPT_MODES = {
|
| 69 |
"document": "<image>\n<|grounding|>Convert the document to markdown.",
|
|
|
|
| 70 |
"free": "<image>\nFree OCR.",
|
|
|
|
|
|
|
| 71 |
}
|
| 72 |
|
| 73 |
|
|
|
|
| 425 |
print("\nExample usage:")
|
| 426 |
print("\n1. Basic OCR conversion (document mode with grounding):")
|
| 427 |
print(" uv run deepseek-ocr2-vllm.py document-images markdown-docs")
|
| 428 |
+
print("\n2. Free OCR without layout:")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 429 |
print(" uv run deepseek-ocr2-vllm.py images text --prompt-mode free")
|
| 430 |
+
print("\n3. Process a subset for testing:")
|
| 431 |
print(
|
| 432 |
" uv run deepseek-ocr2-vllm.py large-dataset test-output --max-samples 10"
|
| 433 |
)
|
| 434 |
+
print("\n4. Running on HF Jobs:")
|
| 435 |
print(" hf jobs uv run --flavor l4x1 \\")
|
| 436 |
print(" -s HF_TOKEN \\")
|
| 437 |
print(
|
|
|
|
| 449 |
epilog="""
|
| 450 |
Prompt Modes:
|
| 451 |
document Convert document to markdown with grounding (default)
|
|
|
|
| 452 |
free Free OCR without layout preservation
|
|
|
|
|
|
|
| 453 |
|
| 454 |
Examples:
|
| 455 |
+
# Basic usage (document mode with grounding)
|
| 456 |
uv run deepseek-ocr2-vllm.py my-images-dataset ocr-results
|
| 457 |
|
|
|
|
|
|
|
|
|
|
| 458 |
# Free OCR without layout
|
| 459 |
uv run deepseek-ocr2-vllm.py images text --prompt-mode free
|
| 460 |
|