davanstrien HF Staff commited on
Commit
39e8718
·
verified ·
1 Parent(s): 4e95904

Upload paddleocr-vl-1.5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. paddleocr-vl-1.5.py +4 -86
paddleocr-vl-1.5.py CHANGED
@@ -45,7 +45,6 @@ import argparse
45
  import io
46
  import json
47
  import logging
48
- import math
49
  import os
50
  import sys
51
  from datetime import datetime
@@ -101,59 +100,6 @@ def check_cuda_availability():
101
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
102
 
103
 
104
- def smart_resize(
105
- height: int,
106
- width: int,
107
- factor: int = 28,
108
- min_pixels: int = 28 * 28 * 130,
109
- max_pixels: int = 28 * 28 * 1280,
110
- ) -> tuple[int, int]:
111
- """
112
- PaddleOCR-VL's intelligent resize logic.
113
-
114
- Rescales the image so that:
115
- 1. Both dimensions are divisible by 'factor' (28)
116
- 2. Total pixels are within [min_pixels, max_pixels]
117
- 3. Aspect ratio is maintained as closely as possible
118
-
119
- Args:
120
- height: Original image height
121
- width: Original image width
122
- factor: Dimension divisibility factor (default: 28)
123
- min_pixels: Minimum total pixels (default: 100,880)
124
- max_pixels: Maximum total pixels (default: 1,003,520)
125
-
126
- Returns:
127
- Tuple of (new_height, new_width)
128
- """
129
- if height < factor:
130
- width = round((width * factor) / height)
131
- height = factor
132
-
133
- if width < factor:
134
- height = round((height * factor) / width)
135
- width = factor
136
-
137
- if max(height, width) / min(height, width) > 200:
138
- logger.warning(
139
- f"Extreme aspect ratio detected: {max(height, width) / min(height, width):.1f}"
140
- )
141
-
142
- h_bar = round(height / factor) * factor
143
- w_bar = round(width / factor) * factor
144
-
145
- if h_bar * w_bar > max_pixels:
146
- beta = math.sqrt((height * width) / max_pixels)
147
- h_bar = math.floor(height / beta / factor) * factor
148
- w_bar = math.floor(width / beta / factor) * factor
149
- elif h_bar * w_bar < min_pixels:
150
- beta = math.sqrt(min_pixels / (height * width))
151
- h_bar = math.ceil(height * beta / factor) * factor
152
- w_bar = math.ceil(width * beta / factor) * factor
153
-
154
- return h_bar, w_bar
155
-
156
-
157
  def prepare_image(
158
  image: Union[Image.Image, Dict[str, Any], str],
159
  ) -> Image.Image:
@@ -212,9 +158,7 @@ def create_dataset_card(
212
  task_mode: str,
213
  num_samples: int,
214
  processing_time: str,
215
- batch_size: int,
216
  max_tokens: int,
217
- apply_smart_resize: bool,
218
  image_column: str = "image",
219
  split: str = "train",
220
  ) -> str:
@@ -249,10 +193,8 @@ This dataset contains {task_mode.upper()} results from images in [{source_datase
249
  - **Image Column**: `{image_column}`
250
  - **Output Column**: `paddleocr_1.5_{task_mode}`
251
  - **Dataset Split**: `{split}`
252
- - **Batch Size**: {batch_size}
253
- - **Smart Resize**: {"Enabled" if apply_smart_resize else "Disabled"}
254
  - **Max Output Tokens**: {max_tokens:,}
255
- - **Backend**: Transformers (batch inference)
256
 
257
  ## Model Information
258
 
@@ -312,8 +254,7 @@ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/paddleocr-vl-1.5.
312
  {source_dataset} \\
313
  <output-dataset> \\
314
  --task-mode {task_mode} \\
315
- --image-column {image_column} \\
316
- --batch-size {batch_size}
317
  ```
318
 
319
  ## Performance
@@ -321,7 +262,7 @@ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/paddleocr-vl-1.5.
321
  - **Model Size**: 0.9B parameters
322
  - **Benchmark Score**: 94.5% SOTA on OmniDocBench v1.5
323
  - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.2f} images/second
324
- - **Backend**: Transformers batch inference
325
 
326
  Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
327
  """
@@ -331,10 +272,8 @@ def main(
331
  input_dataset: str,
332
  output_dataset: str,
333
  image_column: str = "image",
334
- batch_size: int = 8,
335
  task_mode: str = "ocr",
336
  max_tokens: int = 512,
337
- apply_smart_resize: bool = True,
338
  hf_token: str = None,
339
  split: str = "train",
340
  max_samples: int = None,
@@ -418,10 +357,8 @@ def main(
418
  # Note: Batch processing with transformers VLMs can be unreliable,
419
  # so we process individually for stability
420
  all_outputs = []
421
- logger.info("Starting image processing loop...")
422
 
423
  for i in tqdm(range(len(dataset)), desc=f"PaddleOCR-VL-1.5 {task_mode.upper()}"):
424
- logger.info(f"Processing image {i+1}/{len(dataset)}...")
425
  try:
426
  # Prepare image and create message
427
  image = dataset[i][image_column]
@@ -456,7 +393,6 @@ def main(
456
  generated_ids = outputs[0, input_len:]
457
  result = processor.decode(generated_ids, skip_special_tokens=True)
458
  all_outputs.append(result.strip())
459
- logger.info(f"Image {i+1} done. Output length: {len(result)} chars")
460
 
461
  except Exception as e:
462
  logger.error(f"Error processing image {i}: {e}")
@@ -479,7 +415,6 @@ def main(
479
  "column_name": output_column,
480
  "timestamp": datetime.now().isoformat(),
481
  "max_tokens": max_tokens,
482
- "smart_resize": apply_smart_resize,
483
  "backend": "transformers",
484
  }
485
 
@@ -519,9 +454,7 @@ def main(
519
  task_mode=task_mode,
520
  num_samples=len(dataset),
521
  processing_time=processing_time_str,
522
- batch_size=batch_size,
523
  max_tokens=max_tokens,
524
- apply_smart_resize=apply_smart_resize,
525
  image_column=image_column,
526
  split=split,
527
  )
@@ -567,9 +500,7 @@ if __name__ == "__main__":
567
  print("\n2. Table extraction:")
568
  print(" uv run paddleocr-vl-1.5.py docs tables-extracted --task-mode table")
569
  print("\n3. Formula recognition:")
570
- print(
571
- " uv run paddleocr-vl-1.5.py papers formulas --task-mode formula --batch-size 16"
572
- )
573
  print("\n4. Text spotting (higher resolution):")
574
  print(" uv run paddleocr-vl-1.5.py images spotted --task-mode spotting")
575
  print("\n5. Seal recognition:")
@@ -636,12 +567,6 @@ Backend: Transformers batch inference (not vLLM)
636
  default="image",
637
  help="Column containing images (default: image)",
638
  )
639
- parser.add_argument(
640
- "--batch-size",
641
- type=int,
642
- default=1,
643
- help="Batch size (currently ignored - images processed one at a time for stability)",
644
- )
645
  parser.add_argument(
646
  "--task-mode",
647
  choices=list(TASK_MODES.keys()),
@@ -654,11 +579,6 @@ Backend: Transformers batch inference (not vLLM)
654
  default=512,
655
  help="Maximum tokens to generate (default: 512)",
656
  )
657
- parser.add_argument(
658
- "--no-smart-resize",
659
- action="store_true",
660
- help="Disable smart resize, use original image size",
661
- )
662
  parser.add_argument("--hf-token", help="Hugging Face API token")
663
  parser.add_argument(
664
  "--split", default="train", help="Dataset split to use (default: train)"
@@ -691,10 +611,8 @@ Backend: Transformers batch inference (not vLLM)
691
  input_dataset=args.input_dataset,
692
  output_dataset=args.output_dataset,
693
  image_column=args.image_column,
694
- batch_size=args.batch_size,
695
  task_mode=args.task_mode,
696
  max_tokens=args.max_tokens,
697
- apply_smart_resize=not args.no_smart_resize,
698
  hf_token=args.hf_token,
699
  split=args.split,
700
  max_samples=args.max_samples,
 
45
  import io
46
  import json
47
  import logging
 
48
  import os
49
  import sys
50
  from datetime import datetime
 
100
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
101
 
102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  def prepare_image(
104
  image: Union[Image.Image, Dict[str, Any], str],
105
  ) -> Image.Image:
 
158
  task_mode: str,
159
  num_samples: int,
160
  processing_time: str,
 
161
  max_tokens: int,
 
162
  image_column: str = "image",
163
  split: str = "train",
164
  ) -> str:
 
193
  - **Image Column**: `{image_column}`
194
  - **Output Column**: `paddleocr_1.5_{task_mode}`
195
  - **Dataset Split**: `{split}`
 
 
196
  - **Max Output Tokens**: {max_tokens:,}
197
+ - **Backend**: Transformers (single image processing)
198
 
199
  ## Model Information
200
 
 
254
  {source_dataset} \\
255
  <output-dataset> \\
256
  --task-mode {task_mode} \\
257
+ --image-column {image_column}
 
258
  ```
259
 
260
  ## Performance
 
262
  - **Model Size**: 0.9B parameters
263
  - **Benchmark Score**: 94.5% SOTA on OmniDocBench v1.5
264
  - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.2f} images/second
265
+ - **Backend**: Transformers (single image processing)
266
 
267
  Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
268
  """
 
272
  input_dataset: str,
273
  output_dataset: str,
274
  image_column: str = "image",
 
275
  task_mode: str = "ocr",
276
  max_tokens: int = 512,
 
277
  hf_token: str = None,
278
  split: str = "train",
279
  max_samples: int = None,
 
357
  # Note: Batch processing with transformers VLMs can be unreliable,
358
  # so we process individually for stability
359
  all_outputs = []
 
360
 
361
  for i in tqdm(range(len(dataset)), desc=f"PaddleOCR-VL-1.5 {task_mode.upper()}"):
 
362
  try:
363
  # Prepare image and create message
364
  image = dataset[i][image_column]
 
393
  generated_ids = outputs[0, input_len:]
394
  result = processor.decode(generated_ids, skip_special_tokens=True)
395
  all_outputs.append(result.strip())
 
396
 
397
  except Exception as e:
398
  logger.error(f"Error processing image {i}: {e}")
 
415
  "column_name": output_column,
416
  "timestamp": datetime.now().isoformat(),
417
  "max_tokens": max_tokens,
 
418
  "backend": "transformers",
419
  }
420
 
 
454
  task_mode=task_mode,
455
  num_samples=len(dataset),
456
  processing_time=processing_time_str,
 
457
  max_tokens=max_tokens,
 
458
  image_column=image_column,
459
  split=split,
460
  )
 
500
  print("\n2. Table extraction:")
501
  print(" uv run paddleocr-vl-1.5.py docs tables-extracted --task-mode table")
502
  print("\n3. Formula recognition:")
503
+ print(" uv run paddleocr-vl-1.5.py papers formulas --task-mode formula")
 
 
504
  print("\n4. Text spotting (higher resolution):")
505
  print(" uv run paddleocr-vl-1.5.py images spotted --task-mode spotting")
506
  print("\n5. Seal recognition:")
 
567
  default="image",
568
  help="Column containing images (default: image)",
569
  )
 
 
 
 
 
 
570
  parser.add_argument(
571
  "--task-mode",
572
  choices=list(TASK_MODES.keys()),
 
579
  default=512,
580
  help="Maximum tokens to generate (default: 512)",
581
  )
 
 
 
 
 
582
  parser.add_argument("--hf-token", help="Hugging Face API token")
583
  parser.add_argument(
584
  "--split", default="train", help="Dataset split to use (default: train)"
 
611
  input_dataset=args.input_dataset,
612
  output_dataset=args.output_dataset,
613
  image_column=args.image_column,
 
614
  task_mode=args.task_mode,
615
  max_tokens=args.max_tokens,
 
616
  hf_token=args.hf_token,
617
  split=args.split,
618
  max_samples=args.max_samples,