Commit 
							
							ยท
						
						35c7121
	
1
								Parent(s):
							
							7165fc4
								
Update dots-ocr.py with official prompts and simplified design
Browse files- dots-ocr.py +322 -489
    	
        dots-ocr.py
    CHANGED
    
    | @@ -4,30 +4,31 @@ | |
| 4 | 
             
            #     "datasets",
         | 
| 5 | 
             
            #     "huggingface-hub[hf_transfer]",
         | 
| 6 | 
             
            #     "pillow",
         | 
| 7 | 
            -
            #     "vllm",
         | 
| 8 | 
            -
            #     "transformers>=4.45.0",
         | 
| 9 | 
            -
            #     "qwen-vl-utils",
         | 
| 10 | 
             
            #     "tqdm",
         | 
| 11 | 
             
            #     "toolz",
         | 
| 12 | 
             
            #     "torch",
         | 
| 13 | 
            -
            #     "flash-attn",
         | 
| 14 | 
             
            # ]
         | 
| 15 | 
             
            #
         | 
| 16 | 
             
            # ///
         | 
| 17 |  | 
| 18 | 
             
            """
         | 
| 19 | 
            -
             | 
| 20 |  | 
| 21 | 
            -
             | 
| 22 | 
            -
             | 
| 23 | 
            -
             | 
| 24 |  | 
| 25 | 
             
            Features:
         | 
| 26 | 
            -
            -  | 
| 27 | 
            -
            -  | 
| 28 | 
            -
            -  | 
| 29 | 
            -
            -  | 
| 30 | 
            -
            -  | 
|  | |
|  | |
|  | |
|  | |
| 31 | 
             
            """
         | 
| 32 |  | 
| 33 | 
             
            import argparse
         | 
| @@ -37,37 +38,29 @@ import json | |
| 37 | 
             
            import logging
         | 
| 38 | 
             
            import os
         | 
| 39 | 
             
            import sys
         | 
| 40 | 
            -
            from typing import Any, Dict, List,  | 
|  | |
| 41 |  | 
| 42 | 
             
            import torch
         | 
| 43 | 
             
            from datasets import load_dataset
         | 
| 44 | 
            -
            from huggingface_hub import login
         | 
| 45 | 
             
            from PIL import Image
         | 
| 46 | 
             
            from toolz import partition_all
         | 
| 47 | 
             
            from tqdm.auto import tqdm
         | 
| 48 | 
            -
             | 
| 49 | 
            -
            # Import both vLLM and transformers - we'll use based on flag
         | 
| 50 | 
            -
            try:
         | 
| 51 | 
            -
                from vllm import LLM, SamplingParams
         | 
| 52 | 
            -
                VLLM_AVAILABLE = True
         | 
| 53 | 
            -
            except ImportError:
         | 
| 54 | 
            -
                VLLM_AVAILABLE = False
         | 
| 55 | 
            -
                
         | 
| 56 | 
            -
            from transformers import AutoModelForCausalLM, AutoProcessor
         | 
| 57 |  | 
| 58 | 
             
            logging.basicConfig(level=logging.INFO)
         | 
| 59 | 
             
            logger = logging.getLogger(__name__)
         | 
| 60 |  | 
| 61 | 
            -
            # Try to import qwen_vl_utils for transformers backend
         | 
| 62 | 
            -
            try:
         | 
| 63 | 
            -
                from qwen_vl_utils import process_vision_info
         | 
| 64 | 
            -
                QWEN_VL_AVAILABLE = True
         | 
| 65 | 
            -
            except ImportError:
         | 
| 66 | 
            -
                QWEN_VL_AVAILABLE = False
         | 
| 67 | 
            -
                logger.warning("qwen_vl_utils not available, transformers backend may not work properly")
         | 
| 68 |  | 
| 69 | 
            -
            #  | 
| 70 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 71 | 
             
                "layout-all": """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox.
         | 
| 72 |  | 
| 73 | 
             
            1. Bbox format: [x1, y1, x2, y2]
         | 
| @@ -84,14 +77,9 @@ PROMPT_MODES = { | |
| 84 | 
             
                - The output text must be the original text from the image, with no translation.
         | 
| 85 | 
             
                - All layout elements must be sorted according to human reading order.
         | 
| 86 |  | 
| 87 | 
            -
            5. Final Output: The entire output must be a single JSON object.
         | 
| 88 | 
            -
             | 
| 89 | 
            -
                
         | 
| 90 | 
             
                "layout-only": """Please output the layout information from this PDF image, including each layout's bbox and its category. The bbox should be in the format [x1, y1, x2, y2]. The layout categories for the PDF document include ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. Do not output the corresponding text. The layout result should be in JSON format.""",
         | 
| 91 | 
            -
                
         | 
| 92 | 
            -
                "ocr": """Extract the text content from this image.""",
         | 
| 93 | 
            -
                
         | 
| 94 | 
            -
                "grounding-ocr": """Extract text from the given bounding box on the image (format: [x1, y1, x2, y2]).\nBounding Box:\n"""
         | 
| 95 | 
             
            }
         | 
| 96 |  | 
| 97 |  | 
| @@ -105,12 +93,11 @@ def check_cuda_availability(): | |
| 105 | 
             
                    logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
         | 
| 106 |  | 
| 107 |  | 
| 108 | 
            -
            def  | 
| 109 | 
             
                image: Union[Image.Image, Dict[str, Any], str],
         | 
| 110 | 
            -
                 | 
| 111 | 
            -
                bbox: Optional[List[int]] = None,
         | 
| 112 | 
             
            ) -> List[Dict]:
         | 
| 113 | 
            -
                """Create chat message for  | 
| 114 | 
             
                # Convert to PIL Image if needed
         | 
| 115 | 
             
                if isinstance(image, Image.Image):
         | 
| 116 | 
             
                    pil_img = image
         | 
| @@ -121,18 +108,14 @@ def make_dots_message( | |
| 121 | 
             
                else:
         | 
| 122 | 
             
                    raise ValueError(f"Unsupported image type: {type(image)}")
         | 
| 123 |  | 
|  | |
|  | |
|  | |
| 124 | 
             
                # Convert to base64 data URI
         | 
| 125 | 
             
                buf = io.BytesIO()
         | 
| 126 | 
             
                pil_img.save(buf, format="PNG")
         | 
| 127 | 
             
                data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
         | 
| 128 |  | 
| 129 | 
            -
                # Get prompt for the specified mode
         | 
| 130 | 
            -
                prompt = PROMPT_MODES.get(mode, PROMPT_MODES["layout-all"])
         | 
| 131 | 
            -
                
         | 
| 132 | 
            -
                # Add bbox for grounding-ocr mode
         | 
| 133 | 
            -
                if mode == "grounding-ocr" and bbox:
         | 
| 134 | 
            -
                    prompt = prompt + str(bbox)
         | 
| 135 | 
            -
                
         | 
| 136 | 
             
                # Return message in vLLM format
         | 
| 137 | 
             
                return [
         | 
| 138 | 
             
                    {
         | 
| @@ -145,242 +128,145 @@ def make_dots_message( | |
| 145 | 
             
                ]
         | 
| 146 |  | 
| 147 |  | 
| 148 | 
            -
            def  | 
| 149 | 
            -
                 | 
| 150 | 
            -
                 | 
| 151 | 
            -
                 | 
| 152 | 
            -
                 | 
| 153 | 
            -
             | 
| 154 | 
            -
                 | 
| 155 | 
            -
                
         | 
| 156 | 
            -
                 | 
| 157 | 
            -
                 | 
| 158 | 
            -
             | 
| 159 | 
            -
                
         | 
| 160 | 
            -
             | 
| 161 | 
            -
             | 
| 162 | 
            -
             | 
| 163 | 
            -
             | 
| 164 | 
            -
             | 
| 165 | 
            -
             | 
| 166 | 
            -
             | 
| 167 | 
            -
             | 
| 168 | 
            -
             | 
| 169 | 
            -
             | 
| 170 | 
            -
             | 
| 171 | 
            -
             | 
| 172 | 
            -
             | 
| 173 | 
            -
             | 
| 174 | 
            -
             | 
| 175 | 
            -
             | 
| 176 | 
            -
             | 
| 177 | 
            -
             | 
| 178 | 
            -
             | 
| 179 | 
            -
             | 
| 180 | 
            -
             | 
| 181 | 
            -
             | 
| 182 | 
            -
             | 
| 183 | 
            -
             | 
| 184 | 
            -
             | 
| 185 | 
            -
             | 
| 186 | 
            -
             | 
| 187 | 
            -
             | 
| 188 | 
            -
             | 
| 189 | 
            -
             | 
| 190 | 
            -
             | 
| 191 | 
            -
             | 
| 192 | 
            -
             | 
| 193 | 
            -
             | 
| 194 | 
            -
             | 
| 195 | 
            -
             | 
| 196 | 
            -
             | 
| 197 | 
            -
             | 
| 198 | 
            -
             | 
| 199 | 
            -
             | 
| 200 | 
            -
             | 
| 201 | 
            -
             | 
| 202 | 
            -
             | 
| 203 | 
            -
             | 
| 204 | 
            -
             | 
| 205 | 
            -
             | 
| 206 | 
            -
             | 
| 207 | 
            -
             | 
| 208 | 
            -
             | 
| 209 | 
            -
             | 
| 210 | 
            -
             | 
| 211 | 
            -
             | 
| 212 | 
            -
             | 
| 213 | 
            -
             | 
| 214 | 
            -
             | 
| 215 | 
            -
             | 
| 216 | 
            -
             | 
| 217 | 
            -
             | 
| 218 | 
            -
             | 
| 219 | 
            -
             | 
| 220 | 
            -
             | 
| 221 | 
            -
             | 
| 222 | 
            -
             | 
| 223 | 
            -
             | 
| 224 | 
            -
             | 
| 225 | 
            -
             | 
| 226 | 
            -
             | 
| 227 | 
            -
             | 
| 228 | 
            -
             | 
| 229 | 
            -
             | 
| 230 | 
            -
             | 
| 231 | 
            -
             | 
| 232 | 
            -
             | 
| 233 | 
            -
             | 
| 234 | 
            -
             | 
| 235 | 
            -
             | 
| 236 | 
            -
             | 
| 237 | 
            -
             | 
| 238 | 
            -
             | 
| 239 | 
            -
             | 
| 240 | 
            -
             | 
| 241 | 
            -
             | 
| 242 | 
            -
             | 
| 243 | 
            -
             | 
| 244 | 
            -
             | 
| 245 | 
            -
                 | 
| 246 | 
            -
                 | 
| 247 | 
            -
             | 
| 248 | 
            -
                 | 
| 249 | 
            -
             | 
| 250 | 
            -
             | 
| 251 | 
            -
             | 
| 252 | 
            -
             | 
| 253 | 
            -
             | 
| 254 | 
            -
             | 
| 255 | 
            -
             | 
| 256 | 
            -
             | 
| 257 | 
            -
             | 
| 258 | 
            -
                elif category == "Table":
         | 
| 259 | 
            -
                    # Tables are already in HTML format from dots.ocr
         | 
| 260 | 
            -
                    return f"\n{text}\n"
         | 
| 261 | 
            -
                elif category == "Formula":
         | 
| 262 | 
            -
                    # Formulas are already in LaTeX format
         | 
| 263 | 
            -
                    return f"\n${text}$\n"
         | 
| 264 | 
            -
                elif category == "Picture":
         | 
| 265 | 
            -
                    # Pictures don't have text in dots.ocr output
         | 
| 266 | 
            -
                    return "\n![Image]()\n"
         | 
| 267 | 
            -
                else:  # Text and any other categories
         | 
| 268 | 
            -
                    return f"{text}\n"
         | 
| 269 | 
            -
             | 
| 270 | 
            -
             | 
| 271 | 
            -
            def process_with_transformers(
         | 
| 272 | 
            -
                images: List[Union[Image.Image, Dict[str, Any], str]],
         | 
| 273 | 
            -
                model,
         | 
| 274 | 
            -
                processor,
         | 
| 275 | 
            -
                mode: str = "layout-all",
         | 
| 276 | 
            -
                max_new_tokens: int = 16384,
         | 
| 277 | 
            -
            ) -> List[str]:
         | 
| 278 | 
            -
                """Process images using transformers instead of vLLM."""
         | 
| 279 | 
            -
                outputs = []
         | 
| 280 | 
            -
                
         | 
| 281 | 
            -
                for image in tqdm(images, desc="Processing with transformers"):
         | 
| 282 | 
            -
                    # Convert to PIL Image if needed
         | 
| 283 | 
            -
                    if isinstance(image, dict) and "bytes" in image:
         | 
| 284 | 
            -
                        pil_image = Image.open(io.BytesIO(image["bytes"]))
         | 
| 285 | 
            -
                    elif isinstance(image, str):
         | 
| 286 | 
            -
                        pil_image = Image.open(image)
         | 
| 287 | 
            -
                    else:
         | 
| 288 | 
            -
                        pil_image = image
         | 
| 289 | 
            -
                        
         | 
| 290 | 
            -
                    # Get prompt for the mode
         | 
| 291 | 
            -
                    prompt = PROMPT_MODES.get(mode, PROMPT_MODES["layout-all"])
         | 
| 292 | 
            -
                    
         | 
| 293 | 
            -
                    # Create messages in the format expected by dots.ocr
         | 
| 294 | 
            -
                    messages = [
         | 
| 295 | 
            -
                        {
         | 
| 296 | 
            -
                            "role": "user",
         | 
| 297 | 
            -
                            "content": [
         | 
| 298 | 
            -
                                {"type": "image", "image": pil_image},
         | 
| 299 | 
            -
                                {"type": "text", "text": prompt}
         | 
| 300 | 
            -
                            ]
         | 
| 301 | 
            -
                        }
         | 
| 302 | 
            -
                    ]
         | 
| 303 | 
            -
                    
         | 
| 304 | 
            -
                    # Preparation for inference (following demo code)
         | 
| 305 | 
            -
                    text = processor.apply_chat_template(
         | 
| 306 | 
            -
                        messages, 
         | 
| 307 | 
            -
                        tokenize=False, 
         | 
| 308 | 
            -
                        add_generation_prompt=True
         | 
| 309 | 
            -
                    )
         | 
| 310 | 
            -
                    
         | 
| 311 | 
            -
                    if QWEN_VL_AVAILABLE:
         | 
| 312 | 
            -
                        # Use process_vision_info as shown in demo
         | 
| 313 | 
            -
                        image_inputs, video_inputs = process_vision_info(messages)
         | 
| 314 | 
            -
                        inputs = processor(
         | 
| 315 | 
            -
                            text=[text],
         | 
| 316 | 
            -
                            images=image_inputs,
         | 
| 317 | 
            -
                            videos=video_inputs,
         | 
| 318 | 
            -
                            padding=True,
         | 
| 319 | 
            -
                            return_tensors="pt",
         | 
| 320 | 
            -
                        )
         | 
| 321 | 
            -
                    else:
         | 
| 322 | 
            -
                        # Fallback approach without qwen_vl_utils
         | 
| 323 | 
            -
                        inputs = processor(
         | 
| 324 | 
            -
                            text=text,
         | 
| 325 | 
            -
                            images=[pil_image],
         | 
| 326 | 
            -
                            return_tensors="pt",
         | 
| 327 | 
            -
                        )
         | 
| 328 | 
            -
                    
         | 
| 329 | 
            -
                    inputs = inputs.to(model.device)
         | 
| 330 | 
            -
                    
         | 
| 331 | 
            -
                    # Generate
         | 
| 332 | 
            -
                    with torch.no_grad():
         | 
| 333 | 
            -
                        generated_ids = model.generate(
         | 
| 334 | 
            -
                            **inputs,
         | 
| 335 | 
            -
                            max_new_tokens=max_new_tokens,
         | 
| 336 | 
            -
                            temperature=0.0,
         | 
| 337 | 
            -
                            do_sample=False,
         | 
| 338 | 
            -
                        )
         | 
| 339 | 
            -
                    
         | 
| 340 | 
            -
                    # Decode output (following demo code)
         | 
| 341 | 
            -
                    generated_ids_trimmed = [
         | 
| 342 | 
            -
                        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
         | 
| 343 | 
            -
                    ]
         | 
| 344 | 
            -
                    output_text = processor.batch_decode(
         | 
| 345 | 
            -
                        generated_ids_trimmed, 
         | 
| 346 | 
            -
                        skip_special_tokens=True, 
         | 
| 347 | 
            -
                        clean_up_tokenization_spaces=False
         | 
| 348 | 
            -
                    )[0]
         | 
| 349 | 
            -
                    
         | 
| 350 | 
            -
                    outputs.append(output_text.strip())
         | 
| 351 | 
            -
                
         | 
| 352 | 
            -
                return outputs
         | 
| 353 |  | 
| 354 |  | 
| 355 | 
             
            def main(
         | 
| 356 | 
             
                input_dataset: str,
         | 
| 357 | 
             
                output_dataset: str,
         | 
| 358 | 
             
                image_column: str = "image",
         | 
| 359 | 
            -
                 | 
| 360 | 
            -
                output_format: str = "json",
         | 
| 361 | 
            -
                filter_category: Optional[str] = None,
         | 
| 362 | 
            -
                batch_size: int = 32,
         | 
| 363 | 
             
                model: str = "rednote-hilab/dots.ocr",
         | 
| 364 | 
            -
                max_model_len: int =  | 
| 365 | 
            -
                max_tokens: int =  | 
| 366 | 
             
                gpu_memory_utilization: float = 0.8,
         | 
| 367 | 
            -
                hf_token:  | 
| 368 | 
             
                split: str = "train",
         | 
| 369 | 
            -
                max_samples:  | 
| 370 | 
             
                private: bool = False,
         | 
| 371 | 
            -
                 | 
| 372 | 
            -
                 | 
| 373 | 
            -
                 | 
| 374 | 
            -
                 | 
| 375 | 
            -
                 | 
| 376 | 
            -
                text_column: str = "layout_texts",
         | 
| 377 | 
            -
                markdown_column: str = "markdown",
         | 
| 378 | 
             
            ):
         | 
| 379 | 
            -
                """Process images from HF dataset through  | 
| 380 |  | 
| 381 | 
             
                # Check CUDA availability first
         | 
| 382 | 
             
                check_cuda_availability()
         | 
| 383 |  | 
|  | |
|  | |
|  | |
| 384 | 
             
                # Enable HF_TRANSFER for faster downloads
         | 
| 385 | 
             
                os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
         | 
| 386 |  | 
| @@ -389,6 +275,14 @@ def main( | |
| 389 | 
             
                if HF_TOKEN:
         | 
| 390 | 
             
                    login(token=HF_TOKEN)
         | 
| 391 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 392 | 
             
                # Load dataset
         | 
| 393 | 
             
                logger.info(f"Loading dataset: {input_dataset}")
         | 
| 394 | 
             
                dataset = load_dataset(input_dataset, split=split)
         | 
| @@ -399,207 +293,176 @@ def main( | |
| 399 | 
             
                        f"Column '{image_column}' not found. Available: {dataset.column_names}"
         | 
| 400 | 
             
                    )
         | 
| 401 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 402 | 
             
                # Limit samples if requested
         | 
| 403 | 
             
                if max_samples:
         | 
| 404 | 
             
                    dataset = dataset.select(range(min(max_samples, len(dataset))))
         | 
| 405 | 
             
                    logger.info(f"Limited to {len(dataset)} samples")
         | 
| 406 |  | 
| 407 | 
            -
                #  | 
| 408 | 
            -
                 | 
| 409 | 
            -
                
         | 
| 410 | 
            -
                 | 
| 411 | 
            -
                     | 
| 412 | 
            -
                     | 
| 413 | 
            -
             | 
| 414 | 
            -
             | 
| 415 | 
            -
             | 
| 416 | 
            -
                    hf_model = AutoModelForCausalLM.from_pretrained(
         | 
| 417 | 
            -
                        model,
         | 
| 418 | 
            -
                        torch_dtype=torch.bfloat16,
         | 
| 419 | 
            -
                        device_map="auto",
         | 
| 420 | 
            -
                        trust_remote_code=True,
         | 
| 421 | 
            -
                    )
         | 
| 422 | 
            -
                    processor = AutoProcessor.from_pretrained(model, trust_remote_code=True)
         | 
| 423 | 
            -
                    
         | 
| 424 | 
            -
                    logger.info(f"Processing {len(dataset)} images with transformers")
         | 
| 425 | 
            -
                    logger.info(f"Mode: {mode}, Output format: {output_format}")
         | 
| 426 | 
            -
                    
         | 
| 427 | 
            -
                    # Process all images
         | 
| 428 | 
            -
                    all_images = [dataset[i][image_column] for i in range(len(dataset))]
         | 
| 429 | 
            -
                    raw_outputs = process_with_transformers(
         | 
| 430 | 
            -
                        all_images, 
         | 
| 431 | 
            -
                        hf_model, 
         | 
| 432 | 
            -
                        processor, 
         | 
| 433 | 
            -
                        mode=mode,
         | 
| 434 | 
            -
                        max_new_tokens=max_tokens
         | 
| 435 | 
            -
                    )
         | 
| 436 | 
            -
                    
         | 
| 437 | 
            -
                    # Parse outputs
         | 
| 438 | 
            -
                    for raw_text in raw_outputs:
         | 
| 439 | 
            -
                        parsed = parse_dots_output(raw_text, output_format, filter_category, mode)
         | 
| 440 | 
            -
                        all_outputs.append(parsed)
         | 
| 441 | 
            -
                        
         | 
| 442 | 
            -
                else:
         | 
| 443 | 
            -
                    # Use vLLM
         | 
| 444 | 
            -
                    logger.info(f"Initializing vLLM with model: {model}")
         | 
| 445 | 
            -
                    llm = LLM(
         | 
| 446 | 
            -
                        model=model,
         | 
| 447 | 
            -
                        trust_remote_code=True,
         | 
| 448 | 
            -
                        max_model_len=max_model_len,
         | 
| 449 | 
            -
                        gpu_memory_utilization=gpu_memory_utilization,
         | 
| 450 | 
            -
                    )
         | 
| 451 |  | 
| 452 | 
            -
             | 
| 453 | 
            -
             | 
| 454 | 
            -
             | 
| 455 | 
            -
             | 
| 456 |  | 
| 457 | 
            -
             | 
| 458 | 
            -
             | 
| 459 |  | 
| 460 | 
            -
             | 
| 461 | 
            -
             | 
| 462 | 
            -
                        partition_all(batch_size, range(len(dataset))),
         | 
| 463 | 
            -
                        total=(len(dataset) + batch_size - 1) // batch_size,
         | 
| 464 | 
            -
                        desc="dots.ocr processing",
         | 
| 465 | 
            -
                    ):
         | 
| 466 | 
            -
                        batch_indices = list(batch_indices)
         | 
| 467 | 
            -
                        batch_images = [dataset[i][image_column] for i in batch_indices]
         | 
| 468 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 469 | 
             
                        try:
         | 
| 470 | 
            -
                             | 
| 471 | 
            -
             | 
| 472 | 
            -
             | 
| 473 | 
            -
             | 
| 474 | 
            -
             | 
| 475 | 
            -
             | 
| 476 | 
            -
             | 
| 477 | 
            -
             | 
| 478 | 
            -
             | 
| 479 | 
            -
             | 
| 480 | 
            -
             | 
| 481 | 
            -
             | 
| 482 | 
            -
             | 
| 483 | 
            -
                            logger.error(f"Error processing batch: {e}")
         | 
| 484 | 
            -
                            # Add error placeholders for failed batch
         | 
| 485 | 
            -
                            all_outputs.extend([{"error": str(e)}] * len(batch_images))
         | 
| 486 | 
            -
             | 
| 487 | 
            -
                # Add columns to dataset based on output format
         | 
| 488 | 
            -
                logger.info("Adding output columns to dataset")
         | 
| 489 | 
            -
                
         | 
| 490 | 
            -
                if output_format == "json":
         | 
| 491 | 
            -
                    dataset = dataset.add_column(output_column, all_outputs)
         | 
| 492 | 
            -
                    
         | 
| 493 | 
            -
                elif output_format == "structured":
         | 
| 494 | 
            -
                    # Extract lists from structured outputs
         | 
| 495 | 
            -
                    bboxes = []
         | 
| 496 | 
            -
                    categories = []
         | 
| 497 | 
            -
                    texts = []
         | 
| 498 | 
            -
                    
         | 
| 499 | 
            -
                    for output in all_outputs:
         | 
| 500 | 
            -
                        if isinstance(output, dict) and "error" not in output:
         | 
| 501 | 
            -
                            bboxes.append(output.get("bboxes", []))
         | 
| 502 | 
            -
                            categories.append(output.get("categories", []))
         | 
| 503 | 
            -
                            texts.append(output.get("texts", []))
         | 
| 504 | 
            -
                        else:
         | 
| 505 | 
            -
                            bboxes.append([])
         | 
| 506 | 
            -
                            categories.append([])
         | 
| 507 | 
            -
                            texts.append([])
         | 
| 508 | 
            -
                    
         | 
| 509 | 
            -
                    dataset = dataset.add_column(bbox_column, bboxes)
         | 
| 510 | 
            -
                    dataset = dataset.add_column(category_column, categories)
         | 
| 511 | 
            -
                    dataset = dataset.add_column(text_column, texts)
         | 
| 512 | 
            -
                    
         | 
| 513 | 
            -
                elif output_format == "markdown":
         | 
| 514 | 
            -
                    dataset = dataset.add_column(markdown_column, all_outputs)
         | 
| 515 | 
            -
                
         | 
| 516 | 
            -
                else:  # ocr mode
         | 
| 517 | 
            -
                    dataset = dataset.add_column(output_column, all_outputs)
         | 
| 518 |  | 
| 519 | 
             
                # Push to hub
         | 
| 520 | 
             
                logger.info(f"Pushing to {output_dataset}")
         | 
| 521 | 
             
                dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
         | 
| 522 |  | 
| 523 | 
            -
                 | 
| 524 | 
            -
                logger.info(
         | 
| 525 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 526 | 
             
                )
         | 
| 527 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 528 |  | 
| 529 | 
             
            if __name__ == "__main__":
         | 
| 530 | 
             
                # Show example usage if no arguments
         | 
| 531 | 
             
                if len(sys.argv) == 1:
         | 
| 532 | 
             
                    print("=" * 80)
         | 
| 533 | 
            -
                    print(" | 
| 534 | 
             
                    print("=" * 80)
         | 
| 535 | 
            -
                    print("\ | 
| 536 | 
            -
                    print("extract layout information, text content, or both.")
         | 
| 537 | 
             
                    print("\nFeatures:")
         | 
| 538 | 
            -
                    print("-  | 
| 539 | 
            -
                    print("-  | 
| 540 | 
            -
                    print("-  | 
| 541 | 
            -
                    print("-  | 
|  | |
| 542 | 
             
                    print("\nExample usage:")
         | 
| 543 | 
            -
                    print("\n1.  | 
| 544 | 
            -
                    print("   uv run dots-ocr.py  | 
| 545 | 
            -
                    print("\n2.  | 
| 546 | 
            -
                    print("   uv run dots-ocr.py  | 
| 547 | 
            -
                    print("\n3.  | 
| 548 | 
            -
                    print("   uv run dots-ocr.py  | 
| 549 | 
            -
                    print("\n4.  | 
| 550 | 
            -
                    print("   uv run dots-ocr.py  | 
| 551 | 
            -
                    print("\n5.  | 
| 552 | 
            -
                    print("   uv run  | 
| 553 | 
            -
                    print("\n6. Structured output with custom columns:")
         | 
| 554 | 
            -
                    print("   uv run dots-ocr.py docs analyzed \\")
         | 
| 555 | 
            -
                    print("       --output-format structured \\")
         | 
| 556 | 
            -
                    print("       --bbox-column boxes \\")
         | 
| 557 | 
            -
                    print("       --category-column types \\")
         | 
| 558 | 
            -
                    print("       --text-column content")
         | 
| 559 | 
            -
                    print("\n7. Process a subset for testing:")
         | 
| 560 | 
            -
                    print("   uv run dots-ocr.py large-dataset test-output --max-samples 10")
         | 
| 561 | 
            -
                    print("\n8. Use transformers backend (more compatible):")
         | 
| 562 | 
            -
                    print("   uv run dots-ocr.py documents analyzed --use-transformers")
         | 
| 563 | 
            -
                    print("\n9. Running on HF Jobs:")
         | 
| 564 | 
            -
                    print("   hf jobs run --gpu l4x1 \\")
         | 
| 565 | 
             
                    print("     -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
         | 
| 566 | 
            -
                    print(
         | 
| 567 | 
            -
             | 
| 568 | 
            -
                    )
         | 
| 569 | 
            -
                    print("       your-document-dataset \\")
         | 
| 570 | 
            -
                    print("       your-analyzed-output \\")
         | 
| 571 | 
            -
                    print("       --use-transformers")
         | 
| 572 | 
             
                    print("\n" + "=" * 80)
         | 
| 573 | 
             
                    print("\nFor full help, run: uv run dots-ocr.py --help")
         | 
| 574 | 
             
                    sys.exit(0)
         | 
| 575 |  | 
| 576 | 
             
                parser = argparse.ArgumentParser(
         | 
| 577 | 
            -
                    description="Document  | 
| 578 | 
             
                    formatter_class=argparse.RawDescriptionHelpFormatter,
         | 
| 579 | 
             
                    epilog="""
         | 
| 580 | 
            -
            Modes:
         | 
| 581 | 
            -
               | 
| 582 | 
            -
              layout- | 
| 583 | 
            -
               | 
| 584 | 
            -
              grounding-ocr - Extract text from specific bbox (requires --bbox)
         | 
| 585 | 
            -
             | 
| 586 | 
            -
            Output Formats:
         | 
| 587 | 
            -
              json        - Raw JSON output from model (default)
         | 
| 588 | 
            -
              structured  - Separate columns for bboxes, categories, texts
         | 
| 589 | 
            -
              markdown    - Convert to markdown format
         | 
| 590 |  | 
| 591 | 
             
            Examples:
         | 
| 592 | 
            -
              # Basic  | 
| 593 | 
             
              uv run dots-ocr.py my-docs analyzed-docs
         | 
| 594 |  | 
| 595 | 
            -
              #  | 
| 596 | 
            -
              uv run dots-ocr.py papers  | 
| 597 |  | 
| 598 | 
            -
              #  | 
| 599 | 
            -
              uv run dots-ocr.py  | 
| 600 | 
            -
             | 
| 601 | 
            -
              # Extract only formulas
         | 
| 602 | 
            -
              uv run dots-ocr.py math-docs formulas --filter-category Formula
         | 
| 603 | 
             
                    """,
         | 
| 604 | 
             
                )
         | 
| 605 |  | 
| @@ -610,29 +473,11 @@ Examples: | |
| 610 | 
             
                    default="image",
         | 
| 611 | 
             
                    help="Column containing images (default: image)",
         | 
| 612 | 
             
                )
         | 
| 613 | 
            -
                parser.add_argument(
         | 
| 614 | 
            -
                    "--mode",
         | 
| 615 | 
            -
                    choices=["layout-all", "layout-only", "ocr", "grounding-ocr"],
         | 
| 616 | 
            -
                    default="layout-all",
         | 
| 617 | 
            -
                    help="Processing mode (default: layout-all)",
         | 
| 618 | 
            -
                )
         | 
| 619 | 
            -
                parser.add_argument(
         | 
| 620 | 
            -
                    "--output-format",
         | 
| 621 | 
            -
                    choices=["json", "structured", "markdown"],
         | 
| 622 | 
            -
                    default="json",
         | 
| 623 | 
            -
                    help="Output format (default: json)",
         | 
| 624 | 
            -
                )
         | 
| 625 | 
            -
                parser.add_argument(
         | 
| 626 | 
            -
                    "--filter-category",
         | 
| 627 | 
            -
                    choices=['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 
         | 
| 628 | 
            -
                             'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title'],
         | 
| 629 | 
            -
                    help="Filter results by layout category",
         | 
| 630 | 
            -
                )
         | 
| 631 | 
             
                parser.add_argument(
         | 
| 632 | 
             
                    "--batch-size",
         | 
| 633 | 
             
                    type=int,
         | 
| 634 | 
            -
                    default= | 
| 635 | 
            -
                    help="Batch size for processing (default:  | 
| 636 | 
             
                )
         | 
| 637 | 
             
                parser.add_argument(
         | 
| 638 | 
             
                    "--model",
         | 
| @@ -642,14 +487,14 @@ Examples: | |
| 642 | 
             
                parser.add_argument(
         | 
| 643 | 
             
                    "--max-model-len",
         | 
| 644 | 
             
                    type=int,
         | 
| 645 | 
            -
                    default= | 
| 646 | 
            -
                    help="Maximum model context length (default:  | 
| 647 | 
             
                )
         | 
| 648 | 
             
                parser.add_argument(
         | 
| 649 | 
             
                    "--max-tokens",
         | 
| 650 | 
             
                    type=int,
         | 
| 651 | 
            -
                    default= | 
| 652 | 
            -
                    help="Maximum tokens to generate (default:  | 
| 653 | 
             
                )
         | 
| 654 | 
             
                parser.add_argument(
         | 
| 655 | 
             
                    "--gpu-memory-utilization",
         | 
| @@ -670,36 +515,28 @@ Examples: | |
| 670 | 
             
                    "--private", action="store_true", help="Make output dataset private"
         | 
| 671 | 
             
                )
         | 
| 672 | 
             
                parser.add_argument(
         | 
| 673 | 
            -
                    "-- | 
| 674 | 
            -
                    action="store_true",
         | 
| 675 | 
            -
                    help="Use transformers instead of vLLM (more compatible but slower)",
         | 
| 676 | 
             
                )
         | 
| 677 | 
            -
                
         | 
| 678 | 
            -
                # Column name customization
         | 
| 679 | 
             
                parser.add_argument(
         | 
| 680 | 
            -
                    "-- | 
| 681 | 
            -
                     | 
| 682 | 
            -
                     | 
| 683 | 
            -
             | 
| 684 | 
            -
                parser.add_argument(
         | 
| 685 | 
            -
                    "--bbox-column",
         | 
| 686 | 
            -
                    default="layout_bboxes",
         | 
| 687 | 
            -
                    help="Column name for bboxes in structured mode (default: layout_bboxes)",
         | 
| 688 | 
             
                )
         | 
| 689 | 
             
                parser.add_argument(
         | 
| 690 | 
            -
                    "-- | 
| 691 | 
            -
                     | 
| 692 | 
            -
                     | 
|  | |
| 693 | 
             
                )
         | 
| 694 | 
             
                parser.add_argument(
         | 
| 695 | 
            -
                    "-- | 
| 696 | 
            -
                     | 
| 697 | 
            -
                    help="Column name for texts in structured mode (default: layout_texts)",
         | 
| 698 | 
             
                )
         | 
| 699 | 
             
                parser.add_argument(
         | 
| 700 | 
            -
                    "-- | 
| 701 | 
             
                    default="markdown",
         | 
| 702 | 
            -
                    help="Column name for  | 
| 703 | 
             
                )
         | 
| 704 |  | 
| 705 | 
             
                args = parser.parse_args()
         | 
| @@ -708,9 +545,6 @@ Examples: | |
| 708 | 
             
                    input_dataset=args.input_dataset,
         | 
| 709 | 
             
                    output_dataset=args.output_dataset,
         | 
| 710 | 
             
                    image_column=args.image_column,
         | 
| 711 | 
            -
                    mode=args.mode,
         | 
| 712 | 
            -
                    output_format=args.output_format,
         | 
| 713 | 
            -
                    filter_category=args.filter_category,
         | 
| 714 | 
             
                    batch_size=args.batch_size,
         | 
| 715 | 
             
                    model=args.model,
         | 
| 716 | 
             
                    max_model_len=args.max_model_len,
         | 
| @@ -720,10 +554,9 @@ Examples: | |
| 720 | 
             
                    split=args.split,
         | 
| 721 | 
             
                    max_samples=args.max_samples,
         | 
| 722 | 
             
                    private=args.private,
         | 
| 723 | 
            -
                     | 
|  | |
|  | |
|  | |
| 724 | 
             
                    output_column=args.output_column,
         | 
| 725 | 
            -
             | 
| 726 | 
            -
                    category_column=args.category_column,
         | 
| 727 | 
            -
                    text_column=args.text_column,
         | 
| 728 | 
            -
                    markdown_column=args.markdown_column,
         | 
| 729 | 
            -
                )
         | 
|  | |
| 4 | 
             
            #     "datasets",
         | 
| 5 | 
             
            #     "huggingface-hub[hf_transfer]",
         | 
| 6 | 
             
            #     "pillow",
         | 
| 7 | 
            +
            #     "vllm>=0.9.1",
         | 
|  | |
|  | |
| 8 | 
             
            #     "tqdm",
         | 
| 9 | 
             
            #     "toolz",
         | 
| 10 | 
             
            #     "torch",
         | 
|  | |
| 11 | 
             
            # ]
         | 
| 12 | 
             
            #
         | 
| 13 | 
             
            # ///
         | 
| 14 |  | 
| 15 | 
             
            """
         | 
| 16 | 
            +
            Convert document images to markdown using DoTS.ocr with vLLM.
         | 
| 17 |  | 
| 18 | 
            +
            DoTS.ocr is a compact 1.7B multilingual document parsing model with SOTA performance
         | 
| 19 | 
            +
            on 100+ languages. This script uses vLLM for efficient batch processing (2-3x faster
         | 
| 20 | 
            +
            than native HuggingFace transformers).
         | 
| 21 |  | 
| 22 | 
             
            Features:
         | 
| 23 | 
            +
            - ๐ Multilingual support (100+ languages)
         | 
| 24 | 
            +
            - โก Fast processing with vLLM (2-3x speedup)
         | 
| 25 | 
            +
            - ๐ Table extraction and formatting
         | 
| 26 | 
            +
            - ๐ Formula recognition
         | 
| 27 | 
            +
            - ๐ Layout-aware text extraction
         | 
| 28 | 
            +
            - ๐ฏ Compact model (1.7B parameters)
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            Model: rednote-hilab/dots.ocr
         | 
| 31 | 
            +
            vLLM: Officially tested with 0.9.1+ (native support via PR #24645)
         | 
| 32 | 
             
            """
         | 
| 33 |  | 
| 34 | 
             
            import argparse
         | 
|  | |
| 38 | 
             
            import logging
         | 
| 39 | 
             
            import os
         | 
| 40 | 
             
            import sys
         | 
| 41 | 
            +
            from typing import Any, Dict, List, Union
         | 
| 42 | 
            +
            from datetime import datetime
         | 
| 43 |  | 
| 44 | 
             
            import torch
         | 
| 45 | 
             
            from datasets import load_dataset
         | 
| 46 | 
            +
            from huggingface_hub import DatasetCard, login
         | 
| 47 | 
             
            from PIL import Image
         | 
| 48 | 
             
            from toolz import partition_all
         | 
| 49 | 
             
            from tqdm.auto import tqdm
         | 
| 50 | 
            +
            from vllm import LLM, SamplingParams
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 51 |  | 
| 52 | 
             
            logging.basicConfig(level=logging.INFO)
         | 
| 53 | 
             
            logger = logging.getLogger(__name__)
         | 
| 54 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 55 |  | 
| 56 | 
            +
            # โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
         | 
| 57 | 
            +
            # DoTS OCR Prompt Templates (from official dots.ocr repo)
         | 
| 58 | 
            +
            # Source: https://github.com/rednote-hilab/dots.ocr/blob/master/dots_ocr/utils/prompts.py
         | 
| 59 | 
            +
            # โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
         | 
| 60 | 
            +
             | 
| 61 | 
            +
            PROMPT_TEMPLATES = {
         | 
| 62 | 
            +
                "ocr": "Extract the text content from this image.",
         | 
| 63 | 
            +
             | 
| 64 | 
             
                "layout-all": """Please output the layout information from the PDF image, including each layout element's bbox, its category, and the corresponding text content within the bbox.
         | 
| 65 |  | 
| 66 | 
             
            1. Bbox format: [x1, y1, x2, y2]
         | 
|  | |
| 77 | 
             
                - The output text must be the original text from the image, with no translation.
         | 
| 78 | 
             
                - All layout elements must be sorted according to human reading order.
         | 
| 79 |  | 
| 80 | 
            +
            5. Final Output: The entire output must be a single JSON object.""",
         | 
| 81 | 
            +
             | 
|  | |
| 82 | 
             
                "layout-only": """Please output the layout information from this PDF image, including each layout's bbox and its category. The bbox should be in the format [x1, y1, x2, y2]. The layout categories for the PDF document include ['Caption', 'Footnote', 'Formula', 'List-item', 'Page-footer', 'Page-header', 'Picture', 'Section-header', 'Table', 'Text', 'Title']. Do not output the corresponding text. The layout result should be in JSON format.""",
         | 
|  | |
|  | |
|  | |
|  | |
| 83 | 
             
            }
         | 
| 84 |  | 
| 85 |  | 
|  | |
| 93 | 
             
                    logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
         | 
| 94 |  | 
| 95 |  | 
| 96 | 
            +
            def make_ocr_message(
         | 
| 97 | 
             
                image: Union[Image.Image, Dict[str, Any], str],
         | 
| 98 | 
            +
                prompt: str = PROMPT_TEMPLATES["ocr"],
         | 
|  | |
| 99 | 
             
            ) -> List[Dict]:
         | 
| 100 | 
            +
                """Create chat message for OCR processing."""
         | 
| 101 | 
             
                # Convert to PIL Image if needed
         | 
| 102 | 
             
                if isinstance(image, Image.Image):
         | 
| 103 | 
             
                    pil_img = image
         | 
|  | |
| 108 | 
             
                else:
         | 
| 109 | 
             
                    raise ValueError(f"Unsupported image type: {type(image)}")
         | 
| 110 |  | 
| 111 | 
            +
                # Convert to RGB
         | 
| 112 | 
            +
                pil_img = pil_img.convert("RGB")
         | 
| 113 | 
            +
             | 
| 114 | 
             
                # Convert to base64 data URI
         | 
| 115 | 
             
                buf = io.BytesIO()
         | 
| 116 | 
             
                pil_img.save(buf, format="PNG")
         | 
| 117 | 
             
                data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
         | 
| 118 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 119 | 
             
                # Return message in vLLM format
         | 
| 120 | 
             
                return [
         | 
| 121 | 
             
                    {
         | 
|  | |
| 128 | 
             
                ]
         | 
| 129 |  | 
| 130 |  | 
| 131 | 
            +
            def create_dataset_card(
         | 
| 132 | 
            +
                source_dataset: str,
         | 
| 133 | 
            +
                model: str,
         | 
| 134 | 
            +
                num_samples: int,
         | 
| 135 | 
            +
                processing_time: str,
         | 
| 136 | 
            +
                batch_size: int,
         | 
| 137 | 
            +
                max_model_len: int,
         | 
| 138 | 
            +
                max_tokens: int,
         | 
| 139 | 
            +
                gpu_memory_utilization: float,
         | 
| 140 | 
            +
                image_column: str = "image",
         | 
| 141 | 
            +
                split: str = "train",
         | 
| 142 | 
            +
                prompt_mode: str = "general",
         | 
| 143 | 
            +
            ) -> str:
         | 
| 144 | 
            +
                """Create a dataset card documenting the OCR process."""
         | 
| 145 | 
            +
                model_name = model.split("/")[-1]
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                return f"""---
         | 
| 148 | 
            +
            viewer: false
         | 
| 149 | 
            +
            tags:
         | 
| 150 | 
            +
            - ocr
         | 
| 151 | 
            +
            - document-processing
         | 
| 152 | 
            +
            - dots-ocr
         | 
| 153 | 
            +
            - multilingual
         | 
| 154 | 
            +
            - markdown
         | 
| 155 | 
            +
            - uv-script
         | 
| 156 | 
            +
            - generated
         | 
| 157 | 
            +
            ---
         | 
| 158 | 
            +
             | 
| 159 | 
            +
            # Document OCR using {model_name}
         | 
| 160 | 
            +
             | 
| 161 | 
            +
            This dataset contains OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using DoTS.ocr, a compact 1.7B multilingual model.
         | 
| 162 | 
            +
             | 
| 163 | 
            +
            ## Processing Details
         | 
| 164 | 
            +
             | 
| 165 | 
            +
            - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
         | 
| 166 | 
            +
            - **Model**: [{model}](https://huggingface.co/{model})
         | 
| 167 | 
            +
            - **Number of Samples**: {num_samples:,}
         | 
| 168 | 
            +
            - **Processing Time**: {processing_time}
         | 
| 169 | 
            +
            - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
         | 
| 170 | 
            +
             | 
| 171 | 
            +
            ### Configuration
         | 
| 172 | 
            +
             | 
| 173 | 
            +
            - **Image Column**: `{image_column}`
         | 
| 174 | 
            +
            - **Output Column**: `markdown`
         | 
| 175 | 
            +
            - **Dataset Split**: `{split}`
         | 
| 176 | 
            +
            - **Batch Size**: {batch_size}
         | 
| 177 | 
            +
            - **Prompt Mode**: {prompt_mode}
         | 
| 178 | 
            +
            - **Max Model Length**: {max_model_len:,} tokens
         | 
| 179 | 
            +
            - **Max Output Tokens**: {max_tokens:,}
         | 
| 180 | 
            +
            - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
         | 
| 181 | 
            +
             | 
| 182 | 
            +
            ## Model Information
         | 
| 183 | 
            +
             | 
| 184 | 
            +
            DoTS.ocr is a compact multilingual document parsing model that excels at:
         | 
| 185 | 
            +
            - ๐ **100+ Languages** - Multilingual document support
         | 
| 186 | 
            +
            - ๐ **Table extraction** - Structured data recognition
         | 
| 187 | 
            +
            - ๐ **Formulas** - Mathematical notation preservation
         | 
| 188 | 
            +
            - ๐ **Layout-aware** - Reading order and structure preservation
         | 
| 189 | 
            +
            - โก **Fast inference** - 2-3x faster than native HF with vLLM
         | 
| 190 | 
            +
            - ๐ฏ **Compact** - Only 1.7B parameters
         | 
| 191 | 
            +
             | 
| 192 | 
            +
            ## Dataset Structure
         | 
| 193 | 
            +
             | 
| 194 | 
            +
            The dataset contains all original columns plus:
         | 
| 195 | 
            +
            - `markdown`: The extracted text in markdown format
         | 
| 196 | 
            +
            - `inference_info`: JSON list tracking all OCR models applied to this dataset
         | 
| 197 | 
            +
             | 
| 198 | 
            +
            ## Usage
         | 
| 199 | 
            +
             | 
| 200 | 
            +
            ```python
         | 
| 201 | 
            +
            from datasets import load_dataset
         | 
| 202 | 
            +
            import json
         | 
| 203 | 
            +
             | 
| 204 | 
            +
            # Load the dataset
         | 
| 205 | 
            +
            dataset = load_dataset("{{output_dataset_id}}", split="{split}")
         | 
| 206 | 
            +
             | 
| 207 | 
            +
            # Access the markdown text
         | 
| 208 | 
            +
            for example in dataset:
         | 
| 209 | 
            +
                print(example["markdown"])
         | 
| 210 | 
            +
                break
         | 
| 211 | 
            +
             | 
| 212 | 
            +
            # View all OCR models applied to this dataset
         | 
| 213 | 
            +
            inference_info = json.loads(dataset[0]["inference_info"])
         | 
| 214 | 
            +
            for info in inference_info:
         | 
| 215 | 
            +
                print(f"Column: {{info['column_name']}} - Model: {{info['model_id']}}")
         | 
| 216 | 
            +
            ```
         | 
| 217 | 
            +
             | 
| 218 | 
            +
            ## Reproduction
         | 
| 219 | 
            +
             | 
| 220 | 
            +
            This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) DoTS OCR script:
         | 
| 221 | 
            +
             | 
| 222 | 
            +
            ```bash
         | 
| 223 | 
            +
            uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/dots-ocr.py \\
         | 
| 224 | 
            +
                {source_dataset} \\
         | 
| 225 | 
            +
                <output-dataset> \\
         | 
| 226 | 
            +
                --image-column {image_column} \\
         | 
| 227 | 
            +
                --batch-size {batch_size} \\
         | 
| 228 | 
            +
                --prompt-mode {prompt_mode} \\
         | 
| 229 | 
            +
                --max-model-len {max_model_len} \\
         | 
| 230 | 
            +
                --max-tokens {max_tokens} \\
         | 
| 231 | 
            +
                --gpu-memory-utilization {gpu_memory_utilization}
         | 
| 232 | 
            +
            ```
         | 
| 233 | 
            +
             | 
| 234 | 
            +
            ## Performance
         | 
| 235 | 
            +
             | 
| 236 | 
            +
            - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
         | 
| 237 | 
            +
            - **GPU Configuration**: vLLM with {gpu_memory_utilization:.0%} GPU memory utilization
         | 
| 238 | 
            +
             | 
| 239 | 
            +
            Generated with ๐ค [UV Scripts](https://huggingface.co/uv-scripts)
         | 
| 240 | 
            +
            """
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 241 |  | 
| 242 |  | 
| 243 | 
             
            def main(
         | 
| 244 | 
             
                input_dataset: str,
         | 
| 245 | 
             
                output_dataset: str,
         | 
| 246 | 
             
                image_column: str = "image",
         | 
| 247 | 
            +
                batch_size: int = 16,
         | 
|  | |
|  | |
|  | |
| 248 | 
             
                model: str = "rednote-hilab/dots.ocr",
         | 
| 249 | 
            +
                max_model_len: int = 8192,
         | 
| 250 | 
            +
                max_tokens: int = 8192,
         | 
| 251 | 
             
                gpu_memory_utilization: float = 0.8,
         | 
| 252 | 
            +
                hf_token: str = None,
         | 
| 253 | 
             
                split: str = "train",
         | 
| 254 | 
            +
                max_samples: int = None,
         | 
| 255 | 
             
                private: bool = False,
         | 
| 256 | 
            +
                shuffle: bool = False,
         | 
| 257 | 
            +
                seed: int = 42,
         | 
| 258 | 
            +
                prompt_mode: str = "ocr",
         | 
| 259 | 
            +
                custom_prompt: str = None,
         | 
| 260 | 
            +
                output_column: str = "markdown",
         | 
|  | |
|  | |
| 261 | 
             
            ):
         | 
| 262 | 
            +
                """Process images from HF dataset through DoTS.ocr model."""
         | 
| 263 |  | 
| 264 | 
             
                # Check CUDA availability first
         | 
| 265 | 
             
                check_cuda_availability()
         | 
| 266 |  | 
| 267 | 
            +
                # Track processing start time
         | 
| 268 | 
            +
                start_time = datetime.now()
         | 
| 269 | 
            +
             | 
| 270 | 
             
                # Enable HF_TRANSFER for faster downloads
         | 
| 271 | 
             
                os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
         | 
| 272 |  | 
|  | |
| 275 | 
             
                if HF_TOKEN:
         | 
| 276 | 
             
                    login(token=HF_TOKEN)
         | 
| 277 |  | 
| 278 | 
            +
                # Determine prompt to use
         | 
| 279 | 
            +
                if custom_prompt:
         | 
| 280 | 
            +
                    prompt = custom_prompt
         | 
| 281 | 
            +
                    logger.info(f"Using custom prompt: {prompt[:50]}...")
         | 
| 282 | 
            +
                else:
         | 
| 283 | 
            +
                    prompt = PROMPT_TEMPLATES.get(prompt_mode, PROMPT_TEMPLATES["ocr"])
         | 
| 284 | 
            +
                    logger.info(f"Using prompt mode: {prompt_mode}")
         | 
| 285 | 
            +
             | 
| 286 | 
             
                # Load dataset
         | 
| 287 | 
             
                logger.info(f"Loading dataset: {input_dataset}")
         | 
| 288 | 
             
                dataset = load_dataset(input_dataset, split=split)
         | 
|  | |
| 293 | 
             
                        f"Column '{image_column}' not found. Available: {dataset.column_names}"
         | 
| 294 | 
             
                    )
         | 
| 295 |  | 
| 296 | 
            +
                # Shuffle if requested
         | 
| 297 | 
            +
                if shuffle:
         | 
| 298 | 
            +
                    logger.info(f"Shuffling dataset with seed {seed}")
         | 
| 299 | 
            +
                    dataset = dataset.shuffle(seed=seed)
         | 
| 300 | 
            +
             | 
| 301 | 
             
                # Limit samples if requested
         | 
| 302 | 
             
                if max_samples:
         | 
| 303 | 
             
                    dataset = dataset.select(range(min(max_samples, len(dataset))))
         | 
| 304 | 
             
                    logger.info(f"Limited to {len(dataset)} samples")
         | 
| 305 |  | 
| 306 | 
            +
                # Initialize vLLM model
         | 
| 307 | 
            +
                logger.info(f"Initializing vLLM with model: {model}")
         | 
| 308 | 
            +
                logger.info("This may take a few minutes on first run...")
         | 
| 309 | 
            +
                llm = LLM(
         | 
| 310 | 
            +
                    model=model,
         | 
| 311 | 
            +
                    trust_remote_code=True,
         | 
| 312 | 
            +
                    max_model_len=max_model_len,
         | 
| 313 | 
            +
                    gpu_memory_utilization=gpu_memory_utilization,
         | 
| 314 | 
            +
                )
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 315 |  | 
| 316 | 
            +
                sampling_params = SamplingParams(
         | 
| 317 | 
            +
                    temperature=0.0,  # Deterministic for OCR
         | 
| 318 | 
            +
                    max_tokens=max_tokens,
         | 
| 319 | 
            +
                )
         | 
| 320 |  | 
| 321 | 
            +
                logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
         | 
| 322 | 
            +
                logger.info(f"Output will be written to column: {output_column}")
         | 
| 323 |  | 
| 324 | 
            +
                # Process images in batches
         | 
| 325 | 
            +
                all_outputs = []
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 326 |  | 
| 327 | 
            +
                for batch_indices in tqdm(
         | 
| 328 | 
            +
                    partition_all(batch_size, range(len(dataset))),
         | 
| 329 | 
            +
                    total=(len(dataset) + batch_size - 1) // batch_size,
         | 
| 330 | 
            +
                    desc="DoTS.ocr processing",
         | 
| 331 | 
            +
                ):
         | 
| 332 | 
            +
                    batch_indices = list(batch_indices)
         | 
| 333 | 
            +
                    batch_images = [dataset[i][image_column] for i in batch_indices]
         | 
| 334 | 
            +
             | 
| 335 | 
            +
                    try:
         | 
| 336 | 
            +
                        # Create messages for batch
         | 
| 337 | 
            +
                        batch_messages = [make_ocr_message(img, prompt) for img in batch_images]
         | 
| 338 | 
            +
             | 
| 339 | 
            +
                        # Process with vLLM
         | 
| 340 | 
            +
                        outputs = llm.chat(batch_messages, sampling_params)
         | 
| 341 | 
            +
             | 
| 342 | 
            +
                        # Extract outputs
         | 
| 343 | 
            +
                        for output in outputs:
         | 
| 344 | 
            +
                            text = output.outputs[0].text.strip()
         | 
| 345 | 
            +
                            all_outputs.append(text)
         | 
| 346 | 
            +
             | 
| 347 | 
            +
                    except Exception as e:
         | 
| 348 | 
            +
                        logger.error(f"Error processing batch: {e}")
         | 
| 349 | 
            +
                        # Add error placeholders for failed batch
         | 
| 350 | 
            +
                        all_outputs.extend(["[OCR ERROR]"] * len(batch_images))
         | 
| 351 | 
            +
             | 
| 352 | 
            +
                # Calculate processing time
         | 
| 353 | 
            +
                processing_duration = datetime.now() - start_time
         | 
| 354 | 
            +
                processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
         | 
| 355 | 
            +
             | 
| 356 | 
            +
                # Add output column to dataset
         | 
| 357 | 
            +
                logger.info(f"Adding '{output_column}' column to dataset")
         | 
| 358 | 
            +
                dataset = dataset.add_column(output_column, all_outputs)
         | 
| 359 | 
            +
             | 
| 360 | 
            +
                # Handle inference_info tracking (for multi-model comparisons)
         | 
| 361 | 
            +
                inference_entry = {
         | 
| 362 | 
            +
                    "model_id": model,
         | 
| 363 | 
            +
                    "column_name": output_column,
         | 
| 364 | 
            +
                    "timestamp": datetime.now().isoformat(),
         | 
| 365 | 
            +
                    "prompt_mode": prompt_mode if not custom_prompt else "custom",
         | 
| 366 | 
            +
                }
         | 
| 367 | 
            +
             | 
| 368 | 
            +
                if "inference_info" in dataset.column_names:
         | 
| 369 | 
            +
                    # Append to existing inference info
         | 
| 370 | 
            +
                    logger.info("Updating existing inference_info column")
         | 
| 371 | 
            +
             | 
| 372 | 
            +
                    def update_inference_info(example):
         | 
| 373 | 
             
                        try:
         | 
| 374 | 
            +
                            existing_info = json.loads(example["inference_info"]) if example["inference_info"] else []
         | 
| 375 | 
            +
                        except (json.JSONDecodeError, TypeError):
         | 
| 376 | 
            +
                            existing_info = []
         | 
| 377 | 
            +
             | 
| 378 | 
            +
                        existing_info.append(inference_entry)
         | 
| 379 | 
            +
                        return {"inference_info": json.dumps(existing_info)}
         | 
| 380 | 
            +
             | 
| 381 | 
            +
                    dataset = dataset.map(update_inference_info)
         | 
| 382 | 
            +
                else:
         | 
| 383 | 
            +
                    # Create new inference_info column
         | 
| 384 | 
            +
                    logger.info("Creating new inference_info column")
         | 
| 385 | 
            +
                    inference_list = [json.dumps([inference_entry])] * len(dataset)
         | 
| 386 | 
            +
                    dataset = dataset.add_column("inference_info", inference_list)
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 387 |  | 
| 388 | 
             
                # Push to hub
         | 
| 389 | 
             
                logger.info(f"Pushing to {output_dataset}")
         | 
| 390 | 
             
                dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
         | 
| 391 |  | 
| 392 | 
            +
                # Create and push dataset card
         | 
| 393 | 
            +
                logger.info("Creating dataset card")
         | 
| 394 | 
            +
                card_content = create_dataset_card(
         | 
| 395 | 
            +
                    source_dataset=input_dataset,
         | 
| 396 | 
            +
                    model=model,
         | 
| 397 | 
            +
                    num_samples=len(dataset),
         | 
| 398 | 
            +
                    processing_time=processing_time_str,
         | 
| 399 | 
            +
                    batch_size=batch_size,
         | 
| 400 | 
            +
                    max_model_len=max_model_len,
         | 
| 401 | 
            +
                    max_tokens=max_tokens,
         | 
| 402 | 
            +
                    gpu_memory_utilization=gpu_memory_utilization,
         | 
| 403 | 
            +
                    image_column=image_column,
         | 
| 404 | 
            +
                    split=split,
         | 
| 405 | 
            +
                    prompt_mode=prompt_mode if not custom_prompt else "custom",
         | 
| 406 | 
             
                )
         | 
| 407 |  | 
| 408 | 
            +
                card = DatasetCard(card_content)
         | 
| 409 | 
            +
                card.push_to_hub(output_dataset, token=HF_TOKEN)
         | 
| 410 | 
            +
             | 
| 411 | 
            +
                logger.info("โ
 DoTS.ocr processing complete!")
         | 
| 412 | 
            +
                logger.info(f"Dataset available at: https://huggingface.co/datasets/{output_dataset}")
         | 
| 413 | 
            +
                logger.info(f"Processing time: {processing_time_str}")
         | 
| 414 | 
            +
             | 
| 415 |  | 
| 416 | 
             
            if __name__ == "__main__":
         | 
| 417 | 
             
                # Show example usage if no arguments
         | 
| 418 | 
             
                if len(sys.argv) == 1:
         | 
| 419 | 
             
                    print("=" * 80)
         | 
| 420 | 
            +
                    print("DoTS.ocr Document Processing")
         | 
| 421 | 
             
                    print("=" * 80)
         | 
| 422 | 
            +
                    print("\nCompact 1.7B multilingual OCR model supporting 100+ languages")
         | 
|  | |
| 423 | 
             
                    print("\nFeatures:")
         | 
| 424 | 
            +
                    print("- ๐ Multilingual support (100+ languages)")
         | 
| 425 | 
            +
                    print("- โก Fast processing with vLLM (2-3x speedup)")
         | 
| 426 | 
            +
                    print("- ๐ Table extraction and formatting")
         | 
| 427 | 
            +
                    print("- ๐ Formula recognition")
         | 
| 428 | 
            +
                    print("- ๐ Layout-aware text extraction")
         | 
| 429 | 
             
                    print("\nExample usage:")
         | 
| 430 | 
            +
                    print("\n1. Basic OCR:")
         | 
| 431 | 
            +
                    print("   uv run dots-ocr.py input-dataset output-dataset")
         | 
| 432 | 
            +
                    print("\n2. With custom settings:")
         | 
| 433 | 
            +
                    print("   uv run dots-ocr.py docs analyzed-docs --batch-size 20 --max-samples 100")
         | 
| 434 | 
            +
                    print("\n3. Layout analysis with structure:")
         | 
| 435 | 
            +
                    print("   uv run dots-ocr.py papers analyzed-structure --prompt-mode layout-all")
         | 
| 436 | 
            +
                    print("\n4. Layout detection only (no text):")
         | 
| 437 | 
            +
                    print("   uv run dots-ocr.py docs layout-info --prompt-mode layout-only")
         | 
| 438 | 
            +
                    print("\n5. Running on HF Jobs:")
         | 
| 439 | 
            +
                    print("   hf jobs uv run --flavor l4x1 \\")
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 440 | 
             
                    print("     -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
         | 
| 441 | 
            +
                    print("     -e HF_HUB_ENABLE_HF_TRANSFER=1 \\")
         | 
| 442 | 
            +
                    print("     https://huggingface.co/datasets/uv-scripts/ocr/raw/main/dots-ocr.py \\")
         | 
| 443 | 
            +
                    print("       input-dataset output-dataset")
         | 
|  | |
|  | |
|  | |
| 444 | 
             
                    print("\n" + "=" * 80)
         | 
| 445 | 
             
                    print("\nFor full help, run: uv run dots-ocr.py --help")
         | 
| 446 | 
             
                    sys.exit(0)
         | 
| 447 |  | 
| 448 | 
             
                parser = argparse.ArgumentParser(
         | 
| 449 | 
            +
                    description="Document OCR using DoTS.ocr (1.7B multilingual model)",
         | 
| 450 | 
             
                    formatter_class=argparse.RawDescriptionHelpFormatter,
         | 
| 451 | 
             
                    epilog="""
         | 
| 452 | 
            +
            Prompt Modes (official DoTS.ocr prompts):
         | 
| 453 | 
            +
              ocr         - Simple text extraction (default)
         | 
| 454 | 
            +
              layout-all  - Layout analysis with bboxes, categories, and text (JSON output)
         | 
| 455 | 
            +
              layout-only - Layout detection with bboxes and categories only (JSON output)
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 456 |  | 
| 457 | 
             
            Examples:
         | 
| 458 | 
            +
              # Basic text OCR (default)
         | 
| 459 | 
             
              uv run dots-ocr.py my-docs analyzed-docs
         | 
| 460 |  | 
| 461 | 
            +
              # Full layout analysis with structure
         | 
| 462 | 
            +
              uv run dots-ocr.py papers structured --prompt-mode layout-all
         | 
| 463 |  | 
| 464 | 
            +
              # Random sampling for testing
         | 
| 465 | 
            +
              uv run dots-ocr.py large-dataset test --max-samples 50 --shuffle
         | 
|  | |
|  | |
|  | |
| 466 | 
             
                    """,
         | 
| 467 | 
             
                )
         | 
| 468 |  | 
|  | |
| 473 | 
             
                    default="image",
         | 
| 474 | 
             
                    help="Column containing images (default: image)",
         | 
| 475 | 
             
                )
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 476 | 
             
                parser.add_argument(
         | 
| 477 | 
             
                    "--batch-size",
         | 
| 478 | 
             
                    type=int,
         | 
| 479 | 
            +
                    default=16,
         | 
| 480 | 
            +
                    help="Batch size for processing (default: 16, DoTS handles 16-30 well)",
         | 
| 481 | 
             
                )
         | 
| 482 | 
             
                parser.add_argument(
         | 
| 483 | 
             
                    "--model",
         | 
|  | |
| 487 | 
             
                parser.add_argument(
         | 
| 488 | 
             
                    "--max-model-len",
         | 
| 489 | 
             
                    type=int,
         | 
| 490 | 
            +
                    default=8192,
         | 
| 491 | 
            +
                    help="Maximum model context length (default: 8192)",
         | 
| 492 | 
             
                )
         | 
| 493 | 
             
                parser.add_argument(
         | 
| 494 | 
             
                    "--max-tokens",
         | 
| 495 | 
             
                    type=int,
         | 
| 496 | 
            +
                    default=8192,
         | 
| 497 | 
            +
                    help="Maximum tokens to generate (default: 8192)",
         | 
| 498 | 
             
                )
         | 
| 499 | 
             
                parser.add_argument(
         | 
| 500 | 
             
                    "--gpu-memory-utilization",
         | 
|  | |
| 515 | 
             
                    "--private", action="store_true", help="Make output dataset private"
         | 
| 516 | 
             
                )
         | 
| 517 | 
             
                parser.add_argument(
         | 
| 518 | 
            +
                    "--shuffle", action="store_true", help="Shuffle dataset before processing"
         | 
|  | |
|  | |
| 519 | 
             
                )
         | 
|  | |
|  | |
| 520 | 
             
                parser.add_argument(
         | 
| 521 | 
            +
                    "--seed",
         | 
| 522 | 
            +
                    type=int,
         | 
| 523 | 
            +
                    default=42,
         | 
| 524 | 
            +
                    help="Random seed for shuffling (default: 42)",
         | 
|  | |
|  | |
|  | |
|  | |
| 525 | 
             
                )
         | 
| 526 | 
             
                parser.add_argument(
         | 
| 527 | 
            +
                    "--prompt-mode",
         | 
| 528 | 
            +
                    choices=list(PROMPT_TEMPLATES.keys()),
         | 
| 529 | 
            +
                    default="ocr",
         | 
| 530 | 
            +
                    help=f"Prompt template to use: {', '.join(PROMPT_TEMPLATES.keys())} (default: ocr)",
         | 
| 531 | 
             
                )
         | 
| 532 | 
             
                parser.add_argument(
         | 
| 533 | 
            +
                    "--custom-prompt",
         | 
| 534 | 
            +
                    help="Custom prompt text (overrides --prompt-mode)",
         | 
|  | |
| 535 | 
             
                )
         | 
| 536 | 
             
                parser.add_argument(
         | 
| 537 | 
            +
                    "--output-column",
         | 
| 538 | 
             
                    default="markdown",
         | 
| 539 | 
            +
                    help="Column name for output text (default: markdown)",
         | 
| 540 | 
             
                )
         | 
| 541 |  | 
| 542 | 
             
                args = parser.parse_args()
         | 
|  | |
| 545 | 
             
                    input_dataset=args.input_dataset,
         | 
| 546 | 
             
                    output_dataset=args.output_dataset,
         | 
| 547 | 
             
                    image_column=args.image_column,
         | 
|  | |
|  | |
|  | |
| 548 | 
             
                    batch_size=args.batch_size,
         | 
| 549 | 
             
                    model=args.model,
         | 
| 550 | 
             
                    max_model_len=args.max_model_len,
         | 
|  | |
| 554 | 
             
                    split=args.split,
         | 
| 555 | 
             
                    max_samples=args.max_samples,
         | 
| 556 | 
             
                    private=args.private,
         | 
| 557 | 
            +
                    shuffle=args.shuffle,
         | 
| 558 | 
            +
                    seed=args.seed,
         | 
| 559 | 
            +
                    prompt_mode=args.prompt_mode,
         | 
| 560 | 
            +
                    custom_prompt=args.custom_prompt,
         | 
| 561 | 
             
                    output_column=args.output_column,
         | 
| 562 | 
            +
                )
         | 
|  | |
|  | |
|  | |
|  | 

