davanstrien HF Staff Claude commited on
Commit
fab5e6e
·
1 Parent(s): 7593b9a

Add DeepSeek-OCR script (Transformers-based)

Browse files

Initial implementation using official transformers API.
- Multiple resolution modes (Tiny/Small/Base/Large/Gundam)
- Sequential processing (no batching)
- Full dataset processing support
- Dataset card generation
- Compatible with HF Jobs

To test before updating README.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. deepseek-ocr.py +586 -0
deepseek-ocr.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "pillow",
7
+ # "torch",
8
+ # "transformers",
9
+ # "tqdm",
10
+ # ]
11
+ #
12
+ # ///
13
+
14
+ """
15
+ Convert document images to markdown using DeepSeek-OCR with Transformers.
16
+
17
+ This script processes images through the DeepSeek-OCR model to extract
18
+ text and structure as markdown, using the official Transformers API.
19
+
20
+ Features:
21
+ - Multiple resolution modes (Tiny/Small/Base/Large/Gundam)
22
+ - LaTeX equation recognition
23
+ - Table extraction and formatting
24
+ - Document structure preservation
25
+ - Image grounding and descriptions
26
+ - Multilingual support
27
+
28
+ Note: This script processes images sequentially (no batching) using the
29
+ official transformers API. It's slower than vLLM-based scripts but uses
30
+ the well-supported official implementation.
31
+ """
32
+
33
+ import argparse
34
+ import json
35
+ import logging
36
+ import os
37
+ import sys
38
+ import tempfile
39
+ from datetime import datetime
40
+ from typing import Optional
41
+
42
+ import torch
43
+ from datasets import load_dataset
44
+ from huggingface_hub import DatasetCard, login
45
+ from PIL import Image
46
+ from tqdm.auto import tqdm
47
+ from transformers import AutoModel, AutoTokenizer
48
+
49
+ logging.basicConfig(level=logging.INFO)
50
+ logger = logging.getLogger(__name__)
51
+
52
+ # Resolution mode presets
53
+ RESOLUTION_MODES = {
54
+ "tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
55
+ "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
56
+ "base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
57
+ "large": {"base_size": 1280, "image_size": 1280, "crop_mode": False},
58
+ "gundam": {"base_size": 1024, "image_size": 640, "crop_mode": True}, # Dynamic resolution
59
+ }
60
+
61
+
62
+ def check_cuda_availability():
63
+ """Check if CUDA is available and exit if not."""
64
+ if not torch.cuda.is_available():
65
+ logger.error("CUDA is not available. This script requires a GPU.")
66
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
67
+ sys.exit(1)
68
+ else:
69
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
70
+
71
+
72
+ def create_dataset_card(
73
+ source_dataset: str,
74
+ model: str,
75
+ num_samples: int,
76
+ processing_time: str,
77
+ resolution_mode: str,
78
+ base_size: int,
79
+ image_size: int,
80
+ crop_mode: bool,
81
+ image_column: str = "image",
82
+ split: str = "train",
83
+ ) -> str:
84
+ """Create a dataset card documenting the OCR process."""
85
+ model_name = model.split("/")[-1]
86
+
87
+ return f"""---
88
+ tags:
89
+ - ocr
90
+ - document-processing
91
+ - deepseek
92
+ - deepseek-ocr
93
+ - markdown
94
+ - uv-script
95
+ - generated
96
+ ---
97
+
98
+ # Document OCR using {model_name}
99
+
100
+ This dataset contains markdown-formatted OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using DeepSeek-OCR.
101
+
102
+ ## Processing Details
103
+
104
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
105
+ - **Model**: [{model}](https://huggingface.co/{model})
106
+ - **Number of Samples**: {num_samples:,}
107
+ - **Processing Time**: {processing_time}
108
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
109
+
110
+ ### Configuration
111
+
112
+ - **Image Column**: `{image_column}`
113
+ - **Output Column**: `markdown`
114
+ - **Dataset Split**: `{split}`
115
+ - **Resolution Mode**: {resolution_mode}
116
+ - **Base Size**: {base_size}
117
+ - **Image Size**: {image_size}
118
+ - **Crop Mode**: {crop_mode}
119
+
120
+ ## Model Information
121
+
122
+ DeepSeek-OCR is a state-of-the-art document OCR model that excels at:
123
+ - 📐 **LaTeX equations** - Mathematical formulas preserved in LaTeX format
124
+ - 📊 **Tables** - Extracted and formatted as HTML/markdown
125
+ - 📝 **Document structure** - Headers, lists, and formatting maintained
126
+ - 🖼️ **Image grounding** - Spatial layout and bounding box information
127
+ - 🔍 **Complex layouts** - Multi-column and hierarchical structures
128
+ - 🌍 **Multilingual** - Supports multiple languages
129
+
130
+ ### Resolution Modes
131
+
132
+ - **Tiny** (512×512): Fast processing, 64 vision tokens
133
+ - **Small** (640×640): Balanced speed/quality, 100 vision tokens
134
+ - **Base** (1024×1024): High quality, 256 vision tokens
135
+ - **Large** (1280×1280): Maximum quality, 400 vision tokens
136
+ - **Gundam** (dynamic): Adaptive multi-tile processing for large documents
137
+
138
+ ## Dataset Structure
139
+
140
+ The dataset contains all original columns plus:
141
+ - `markdown`: The extracted text in markdown format with preserved structure
142
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
143
+
144
+ ## Usage
145
+
146
+ ```python
147
+ from datasets import load_dataset
148
+ import json
149
+
150
+ # Load the dataset
151
+ dataset = load_dataset("{{{{output_dataset_id}}}}", split="{split}")
152
+
153
+ # Access the markdown text
154
+ for example in dataset:
155
+ print(example["markdown"])
156
+ break
157
+
158
+ # View all OCR models applied to this dataset
159
+ inference_info = json.loads(dataset[0]["inference_info"])
160
+ for info in inference_info:
161
+ print(f"Column: {{{{info['column_name']}}}} - Model: {{{{info['model_id']}}}}")
162
+ ```
163
+
164
+ ## Reproduction
165
+
166
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) DeepSeek OCR script:
167
+
168
+ ```bash
169
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr.py \\
170
+ {source_dataset} \\
171
+ <output-dataset> \\
172
+ --resolution-mode {resolution_mode} \\
173
+ --image-column {image_column}
174
+ ```
175
+
176
+ ## Performance
177
+
178
+ - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
179
+ - **Processing Method**: Sequential (Transformers API, no batching)
180
+
181
+ Note: This uses the official Transformers implementation. For faster batch processing,
182
+ consider using the vLLM version once DeepSeek-OCR is officially supported by vLLM.
183
+
184
+ Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
185
+ """
186
+
187
+
188
+ def process_single_image(
189
+ model,
190
+ tokenizer,
191
+ image: Image.Image,
192
+ prompt: str,
193
+ base_size: int,
194
+ image_size: int,
195
+ crop_mode: bool,
196
+ ) -> str:
197
+ """Process a single image through DeepSeek-OCR."""
198
+ # model.infer expects a file path, so save to temp file
199
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
200
+ try:
201
+ # Convert to RGB if needed
202
+ if image.mode != "RGB":
203
+ image = image.convert("RGB")
204
+
205
+ # Save image
206
+ image.save(tmp.name, format="PNG")
207
+
208
+ # Run inference
209
+ result = model.infer(
210
+ tokenizer,
211
+ prompt=prompt,
212
+ image_file=tmp.name,
213
+ output_path="", # Don't save intermediate files
214
+ base_size=base_size,
215
+ image_size=image_size,
216
+ crop_mode=crop_mode,
217
+ save_results=False,
218
+ test_compress=False,
219
+ )
220
+
221
+ return result if isinstance(result, str) else str(result)
222
+
223
+ finally:
224
+ # Clean up temp file
225
+ try:
226
+ os.unlink(tmp.name)
227
+ except:
228
+ pass
229
+
230
+
231
+ def main(
232
+ input_dataset: str,
233
+ output_dataset: str,
234
+ image_column: str = "image",
235
+ model: str = "deepseek-ai/DeepSeek-OCR",
236
+ resolution_mode: str = "gundam",
237
+ base_size: Optional[int] = None,
238
+ image_size: Optional[int] = None,
239
+ crop_mode: Optional[bool] = None,
240
+ prompt: str = "<image>\n<|grounding|>Convert the document to markdown.",
241
+ hf_token: str = None,
242
+ split: str = "train",
243
+ max_samples: int = None,
244
+ private: bool = False,
245
+ shuffle: bool = False,
246
+ seed: int = 42,
247
+ ):
248
+ """Process images from HF dataset through DeepSeek-OCR model."""
249
+
250
+ # Check CUDA availability first
251
+ check_cuda_availability()
252
+
253
+ # Track processing start time
254
+ start_time = datetime.now()
255
+
256
+ # Enable HF_TRANSFER for faster downloads
257
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
258
+
259
+ # Login to HF if token provided
260
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
261
+ if HF_TOKEN:
262
+ login(token=HF_TOKEN)
263
+
264
+ # Determine resolution settings
265
+ if resolution_mode in RESOLUTION_MODES:
266
+ mode_config = RESOLUTION_MODES[resolution_mode]
267
+ final_base_size = base_size if base_size is not None else mode_config["base_size"]
268
+ final_image_size = image_size if image_size is not None else mode_config["image_size"]
269
+ final_crop_mode = crop_mode if crop_mode is not None else mode_config["crop_mode"]
270
+ logger.info(f"Using resolution mode: {resolution_mode}")
271
+ else:
272
+ # Custom mode - require all parameters
273
+ if base_size is None or image_size is None or crop_mode is None:
274
+ raise ValueError(
275
+ f"Invalid resolution mode '{resolution_mode}'. "
276
+ f"Use one of {list(RESOLUTION_MODES.keys())} or specify "
277
+ f"--base-size, --image-size, and --crop-mode manually."
278
+ )
279
+ final_base_size = base_size
280
+ final_image_size = image_size
281
+ final_crop_mode = crop_mode
282
+ resolution_mode = "custom"
283
+
284
+ logger.info(
285
+ f"Resolution: base_size={final_base_size}, "
286
+ f"image_size={final_image_size}, crop_mode={final_crop_mode}"
287
+ )
288
+
289
+ # Load dataset
290
+ logger.info(f"Loading dataset: {input_dataset}")
291
+ dataset = load_dataset(input_dataset, split=split)
292
+
293
+ # Validate image column
294
+ if image_column not in dataset.column_names:
295
+ raise ValueError(
296
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
297
+ )
298
+
299
+ # Shuffle if requested
300
+ if shuffle:
301
+ logger.info(f"Shuffling dataset with seed {seed}")
302
+ dataset = dataset.shuffle(seed=seed)
303
+
304
+ # Limit samples if requested
305
+ if max_samples:
306
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
307
+ logger.info(f"Limited to {len(dataset)} samples")
308
+
309
+ # Initialize model
310
+ logger.info(f"Loading model: {model}")
311
+ tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True)
312
+
313
+ try:
314
+ model_obj = AutoModel.from_pretrained(
315
+ model,
316
+ _attn_implementation="flash_attention_2",
317
+ trust_remote_code=True,
318
+ use_safetensors=True,
319
+ )
320
+ except Exception as e:
321
+ logger.warning(f"Failed to load with flash_attention_2: {e}")
322
+ logger.info("Falling back to standard attention...")
323
+ model_obj = AutoModel.from_pretrained(
324
+ model,
325
+ trust_remote_code=True,
326
+ use_safetensors=True,
327
+ )
328
+
329
+ model_obj = model_obj.eval().cuda().to(torch.bfloat16)
330
+ logger.info("Model loaded successfully")
331
+
332
+ # Process images sequentially
333
+ all_markdown = []
334
+
335
+ logger.info(f"Processing {len(dataset)} images (sequential, no batching)")
336
+ logger.info("Note: This may be slower than vLLM-based scripts")
337
+
338
+ for i in tqdm(range(len(dataset)), desc="OCR processing"):
339
+ try:
340
+ image = dataset[i][image_column]
341
+
342
+ # Handle different image formats
343
+ if isinstance(image, dict) and "bytes" in image:
344
+ from io import BytesIO
345
+ image = Image.open(BytesIO(image["bytes"]))
346
+ elif isinstance(image, str):
347
+ image = Image.open(image)
348
+ elif not isinstance(image, Image.Image):
349
+ raise ValueError(f"Unsupported image type: {type(image)}")
350
+
351
+ # Process image
352
+ result = process_single_image(
353
+ model_obj,
354
+ tokenizer,
355
+ image,
356
+ prompt,
357
+ final_base_size,
358
+ final_image_size,
359
+ final_crop_mode,
360
+ )
361
+
362
+ all_markdown.append(result)
363
+
364
+ except Exception as e:
365
+ logger.error(f"Error processing image {i}: {e}")
366
+ all_markdown.append("[OCR FAILED]")
367
+
368
+ # Add markdown column to dataset
369
+ logger.info("Adding markdown column to dataset")
370
+ dataset = dataset.add_column("markdown", all_markdown)
371
+
372
+ # Handle inference_info tracking
373
+ logger.info("Updating inference_info...")
374
+
375
+ # Check for existing inference_info
376
+ if "inference_info" in dataset.column_names:
377
+ try:
378
+ existing_info = json.loads(dataset[0]["inference_info"])
379
+ if not isinstance(existing_info, list):
380
+ existing_info = [existing_info]
381
+ except (json.JSONDecodeError, TypeError):
382
+ existing_info = []
383
+ dataset = dataset.remove_columns(["inference_info"])
384
+ else:
385
+ existing_info = []
386
+
387
+ # Add new inference info
388
+ new_info = {
389
+ "column_name": "markdown",
390
+ "model_id": model,
391
+ "processing_date": datetime.now().isoformat(),
392
+ "resolution_mode": resolution_mode,
393
+ "base_size": final_base_size,
394
+ "image_size": final_image_size,
395
+ "crop_mode": final_crop_mode,
396
+ "prompt": prompt,
397
+ "script": "deepseek-ocr.py",
398
+ "script_version": "1.0.0",
399
+ "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr.py",
400
+ "implementation": "transformers (sequential)",
401
+ }
402
+ existing_info.append(new_info)
403
+
404
+ # Add updated inference_info column
405
+ info_json = json.dumps(existing_info, ensure_ascii=False)
406
+ dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
407
+
408
+ # Push to hub
409
+ logger.info(f"Pushing to {output_dataset}")
410
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
411
+
412
+ # Calculate processing time
413
+ end_time = datetime.now()
414
+ processing_duration = end_time - start_time
415
+ processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
416
+
417
+ # Create and push dataset card
418
+ logger.info("Creating dataset card...")
419
+ card_content = create_dataset_card(
420
+ source_dataset=input_dataset,
421
+ model=model,
422
+ num_samples=len(dataset),
423
+ processing_time=processing_time,
424
+ resolution_mode=resolution_mode,
425
+ base_size=final_base_size,
426
+ image_size=final_image_size,
427
+ crop_mode=final_crop_mode,
428
+ image_column=image_column,
429
+ split=split,
430
+ )
431
+
432
+ card = DatasetCard(card_content)
433
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
434
+ logger.info("✅ Dataset card created and pushed!")
435
+
436
+ logger.info("✅ OCR conversion complete!")
437
+ logger.info(
438
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
439
+ )
440
+
441
+
442
+ if __name__ == "__main__":
443
+ # Show example usage if no arguments
444
+ if len(sys.argv) == 1:
445
+ print("=" * 80)
446
+ print("DeepSeek-OCR to Markdown Converter (Transformers)")
447
+ print("=" * 80)
448
+ print("\nThis script converts document images to markdown using")
449
+ print("DeepSeek-OCR with the official Transformers API.")
450
+ print("\nFeatures:")
451
+ print("- Multiple resolution modes (Tiny/Small/Base/Large/Gundam)")
452
+ print("- LaTeX equation recognition")
453
+ print("- Table extraction and formatting")
454
+ print("- Document structure preservation")
455
+ print("- Image grounding and spatial layout")
456
+ print("- Multilingual support")
457
+ print("\nNote: Sequential processing (no batching). Slower than vLLM scripts.")
458
+ print("\nExample usage:")
459
+ print("\n1. Basic OCR conversion (Gundam mode - dynamic resolution):")
460
+ print(" uv run deepseek-ocr.py document-images markdown-docs")
461
+ print("\n2. High quality mode (Large - 1280×1280):")
462
+ print(" uv run deepseek-ocr.py scanned-pdfs extracted-text --resolution-mode large")
463
+ print("\n3. Fast processing (Tiny - 512×512):")
464
+ print(" uv run deepseek-ocr.py quick-test output --resolution-mode tiny")
465
+ print("\n4. Process a subset for testing:")
466
+ print(" uv run deepseek-ocr.py large-dataset test-output --max-samples 10")
467
+ print("\n5. Custom resolution:")
468
+ print(" uv run deepseek-ocr.py dataset output \\")
469
+ print(" --base-size 1024 --image-size 640 --crop-mode")
470
+ print("\n6. Running on HF Jobs:")
471
+ print(" hf jobs uv run --flavor l4x1 \\")
472
+ print(' --secrets HF_TOKEN \\')
473
+ print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr.py \\")
474
+ print(" your-document-dataset \\")
475
+ print(" your-markdown-output")
476
+ print("\n" + "=" * 80)
477
+ print("\nFor full help, run: uv run deepseek-ocr.py --help")
478
+ sys.exit(0)
479
+
480
+ parser = argparse.ArgumentParser(
481
+ description="OCR images to markdown using DeepSeek-OCR (Transformers)",
482
+ formatter_class=argparse.RawDescriptionHelpFormatter,
483
+ epilog="""
484
+ Resolution Modes:
485
+ tiny 512×512 pixels, fast processing (64 vision tokens)
486
+ small 640×640 pixels, balanced (100 vision tokens)
487
+ base 1024×1024 pixels, high quality (256 vision tokens)
488
+ large 1280×1280 pixels, maximum quality (400 vision tokens)
489
+ gundam Dynamic multi-tile processing (adaptive)
490
+
491
+ Examples:
492
+ # Basic usage with default Gundam mode
493
+ uv run deepseek-ocr.py my-images-dataset ocr-results
494
+
495
+ # High quality processing
496
+ uv run deepseek-ocr.py documents extracted-text --resolution-mode large
497
+
498
+ # Fast processing for testing
499
+ uv run deepseek-ocr.py dataset output --resolution-mode tiny --max-samples 100
500
+
501
+ # Custom resolution settings
502
+ uv run deepseek-ocr.py dataset output --base-size 1024 --image-size 640 --crop-mode
503
+ """,
504
+ )
505
+
506
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
507
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
508
+ parser.add_argument(
509
+ "--image-column",
510
+ default="image",
511
+ help="Column containing images (default: image)",
512
+ )
513
+ parser.add_argument(
514
+ "--model",
515
+ default="deepseek-ai/DeepSeek-OCR",
516
+ help="Model to use (default: deepseek-ai/DeepSeek-OCR)",
517
+ )
518
+ parser.add_argument(
519
+ "--resolution-mode",
520
+ default="gundam",
521
+ choices=list(RESOLUTION_MODES.keys()) + ["custom"],
522
+ help="Resolution mode preset (default: gundam)",
523
+ )
524
+ parser.add_argument(
525
+ "--base-size",
526
+ type=int,
527
+ help="Base resolution size (overrides resolution-mode)",
528
+ )
529
+ parser.add_argument(
530
+ "--image-size",
531
+ type=int,
532
+ help="Image tile size (overrides resolution-mode)",
533
+ )
534
+ parser.add_argument(
535
+ "--crop-mode",
536
+ action="store_true",
537
+ help="Enable dynamic multi-tile cropping (overrides resolution-mode)",
538
+ )
539
+ parser.add_argument(
540
+ "--prompt",
541
+ default="<image>\n<|grounding|>Convert the document to markdown.",
542
+ help="Prompt for OCR (default: grounding markdown conversion)",
543
+ )
544
+ parser.add_argument("--hf-token", help="Hugging Face API token")
545
+ parser.add_argument(
546
+ "--split", default="train", help="Dataset split to use (default: train)"
547
+ )
548
+ parser.add_argument(
549
+ "--max-samples",
550
+ type=int,
551
+ help="Maximum number of samples to process (for testing)",
552
+ )
553
+ parser.add_argument(
554
+ "--private", action="store_true", help="Make output dataset private"
555
+ )
556
+ parser.add_argument(
557
+ "--shuffle",
558
+ action="store_true",
559
+ help="Shuffle the dataset before processing (useful for random sampling)",
560
+ )
561
+ parser.add_argument(
562
+ "--seed",
563
+ type=int,
564
+ default=42,
565
+ help="Random seed for shuffling (default: 42)",
566
+ )
567
+
568
+ args = parser.parse_args()
569
+
570
+ main(
571
+ input_dataset=args.input_dataset,
572
+ output_dataset=args.output_dataset,
573
+ image_column=args.image_column,
574
+ model=args.model,
575
+ resolution_mode=args.resolution_mode,
576
+ base_size=args.base_size,
577
+ image_size=args.image_size,
578
+ crop_mode=args.crop_mode if args.crop_mode else None,
579
+ prompt=args.prompt,
580
+ hf_token=args.hf_token,
581
+ split=args.split,
582
+ max_samples=args.max_samples,
583
+ private=args.private,
584
+ shuffle=args.shuffle,
585
+ seed=args.seed,
586
+ )