Commit 
							
							·
						
						5951139
	
1
								Parent(s):
							
							1825db8
								
remove flashinfer
Browse files- nanonets-ocr.py +49 -59
    	
        nanonets-ocr.py
    CHANGED
    
    | @@ -5,14 +5,11 @@ | |
| 5 | 
             
            #     "huggingface-hub[hf_transfer]",
         | 
| 6 | 
             
            #     "pillow",
         | 
| 7 | 
             
            #     "vllm",
         | 
| 8 | 
            -
            #     "flashinfer-python",
         | 
| 9 | 
             
            #     "tqdm",
         | 
| 10 | 
             
            #     "toolz",
         | 
| 11 | 
             
            #     "torch",  # Added for CUDA check
         | 
| 12 | 
             
            # ]
         | 
| 13 | 
            -
            # | 
| 14 | 
            -
            # [[tool.uv.index]]
         | 
| 15 | 
            -
            # url = "https://flashinfer.ai/whl/cu121/torch2.4/"
         | 
| 16 | 
             
            # ///
         | 
| 17 |  | 
| 18 | 
             
            """
         | 
| @@ -72,12 +69,12 @@ def make_ocr_message( | |
| 72 | 
             
                    pil_img = Image.open(image)
         | 
| 73 | 
             
                else:
         | 
| 74 | 
             
                    raise ValueError(f"Unsupported image type: {type(image)}")
         | 
| 75 | 
            -
             | 
| 76 | 
             
                # Convert to base64 data URI
         | 
| 77 | 
             
                buf = io.BytesIO()
         | 
| 78 | 
             
                pil_img.save(buf, format="PNG")
         | 
| 79 | 
             
                data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
         | 
| 80 | 
            -
             | 
| 81 | 
             
                # Return message in vLLM format
         | 
| 82 | 
             
                return [
         | 
| 83 | 
             
                    {
         | 
| @@ -105,31 +102,33 @@ def main( | |
| 105 | 
             
                private: bool = False,
         | 
| 106 | 
             
            ):
         | 
| 107 | 
             
                """Process images from HF dataset through OCR model."""
         | 
| 108 | 
            -
             | 
| 109 | 
             
                # Check CUDA availability first
         | 
| 110 | 
             
                check_cuda_availability()
         | 
| 111 | 
            -
             | 
| 112 | 
             
                # Enable HF_TRANSFER for faster downloads
         | 
| 113 | 
             
                os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
         | 
| 114 | 
            -
             | 
| 115 | 
             
                # Login to HF if token provided
         | 
| 116 | 
             
                HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
         | 
| 117 | 
             
                if HF_TOKEN:
         | 
| 118 | 
             
                    login(token=HF_TOKEN)
         | 
| 119 | 
            -
             | 
| 120 | 
             
                # Load dataset
         | 
| 121 | 
             
                logger.info(f"Loading dataset: {input_dataset}")
         | 
| 122 | 
             
                dataset = load_dataset(input_dataset, split=split)
         | 
| 123 | 
            -
             | 
| 124 | 
             
                # Validate image column
         | 
| 125 | 
             
                if image_column not in dataset.column_names:
         | 
| 126 | 
            -
                    raise ValueError( | 
| 127 | 
            -
             | 
|  | |
|  | |
| 128 | 
             
                # Limit samples if requested
         | 
| 129 | 
             
                if max_samples:
         | 
| 130 | 
             
                    dataset = dataset.select(range(min(max_samples, len(dataset))))
         | 
| 131 | 
             
                    logger.info(f"Limited to {len(dataset)} samples")
         | 
| 132 | 
            -
             | 
| 133 | 
             
                # Initialize vLLM
         | 
| 134 | 
             
                logger.info(f"Initializing vLLM with model: {model}")
         | 
| 135 | 
             
                llm = LLM(
         | 
| @@ -139,53 +138,55 @@ def main( | |
| 139 | 
             
                    gpu_memory_utilization=gpu_memory_utilization,
         | 
| 140 | 
             
                    limit_mm_per_prompt={"image": 1},
         | 
| 141 | 
             
                )
         | 
| 142 | 
            -
             | 
| 143 | 
             
                sampling_params = SamplingParams(
         | 
| 144 | 
             
                    temperature=0.0,  # Deterministic for OCR
         | 
| 145 | 
             
                    max_tokens=max_tokens,
         | 
| 146 | 
             
                )
         | 
| 147 | 
            -
             | 
| 148 | 
             
                # Process images in batches
         | 
| 149 | 
             
                all_markdown = []
         | 
| 150 | 
            -
             | 
| 151 | 
             
                logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
         | 
| 152 | 
            -
             | 
| 153 | 
             
                # Process in batches to avoid memory issues
         | 
| 154 | 
             
                for batch_indices in tqdm(
         | 
| 155 | 
             
                    partition_all(batch_size, range(len(dataset))),
         | 
| 156 | 
             
                    total=(len(dataset) + batch_size - 1) // batch_size,
         | 
| 157 | 
            -
                    desc="OCR processing"
         | 
| 158 | 
             
                ):
         | 
| 159 | 
             
                    batch_indices = list(batch_indices)
         | 
| 160 | 
             
                    batch_images = [dataset[i][image_column] for i in batch_indices]
         | 
| 161 | 
            -
             | 
| 162 | 
             
                    try:
         | 
| 163 | 
             
                        # Create messages for batch
         | 
| 164 | 
             
                        batch_messages = [make_ocr_message(img) for img in batch_images]
         | 
| 165 | 
            -
             | 
| 166 | 
             
                        # Process with vLLM
         | 
| 167 | 
             
                        outputs = llm.chat(batch_messages, sampling_params)
         | 
| 168 | 
            -
             | 
| 169 | 
             
                        # Extract markdown from outputs
         | 
| 170 | 
             
                        for output in outputs:
         | 
| 171 | 
             
                            markdown_text = output.outputs[0].text.strip()
         | 
| 172 | 
             
                            all_markdown.append(markdown_text)
         | 
| 173 | 
            -
             | 
| 174 | 
             
                    except Exception as e:
         | 
| 175 | 
             
                        logger.error(f"Error processing batch: {e}")
         | 
| 176 | 
             
                        # Add error placeholders for failed batch
         | 
| 177 | 
             
                        all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
         | 
| 178 | 
            -
             | 
| 179 | 
             
                # Add markdown column to dataset
         | 
| 180 | 
             
                logger.info("Adding markdown column to dataset")
         | 
| 181 | 
             
                dataset = dataset.add_column("markdown", all_markdown)
         | 
| 182 | 
            -
             | 
| 183 | 
             
                # Push to hub
         | 
| 184 | 
             
                logger.info(f"Pushing to {output_dataset}")
         | 
| 185 | 
             
                dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
         | 
| 186 | 
            -
             | 
| 187 | 
             
                logger.info("✅ OCR conversion complete!")
         | 
| 188 | 
            -
                logger.info( | 
|  | |
|  | |
| 189 |  | 
| 190 |  | 
| 191 | 
             
            if __name__ == "__main__":
         | 
| @@ -215,13 +216,15 @@ if __name__ == "__main__": | |
| 215 | 
             
                    print("   hfjobs run \\")
         | 
| 216 | 
             
                    print("     --flavor l4x1 \\")
         | 
| 217 | 
             
                    print("     --secret HF_TOKEN=... \\")
         | 
| 218 | 
            -
                    print( | 
|  | |
|  | |
| 219 | 
             
                    print("       your-document-dataset \\")
         | 
| 220 | 
             
                    print("       your-markdown-output")
         | 
| 221 | 
             
                    print("\n" + "=" * 80)
         | 
| 222 | 
             
                    print("\nFor full help, run: uv run nanonets-ocr.py --help")
         | 
| 223 | 
             
                    sys.exit(0)
         | 
| 224 | 
            -
             | 
| 225 | 
             
                parser = argparse.ArgumentParser(
         | 
| 226 | 
             
                    description="OCR images to markdown using Nanonets-OCR-s",
         | 
| 227 | 
             
                    formatter_class=argparse.RawDescriptionHelpFormatter,
         | 
| @@ -235,73 +238,60 @@ Examples: | |
| 235 |  | 
| 236 | 
             
              # Process subset for testing
         | 
| 237 | 
             
              uv run nanonets-ocr.py large-dataset test-output --max-samples 100
         | 
| 238 | 
            -
                    """
         | 
| 239 | 
            -
                )
         | 
| 240 | 
            -
                
         | 
| 241 | 
            -
                parser.add_argument(
         | 
| 242 | 
            -
                    "input_dataset",
         | 
| 243 | 
            -
                    help="Input dataset ID from Hugging Face Hub"
         | 
| 244 | 
            -
                )
         | 
| 245 | 
            -
                parser.add_argument(
         | 
| 246 | 
            -
                    "output_dataset",
         | 
| 247 | 
            -
                    help="Output dataset ID for Hugging Face Hub"
         | 
| 248 | 
             
                )
         | 
|  | |
|  | |
|  | |
| 249 | 
             
                parser.add_argument(
         | 
| 250 | 
             
                    "--image-column",
         | 
| 251 | 
             
                    default="image",
         | 
| 252 | 
            -
                    help="Column containing images (default: image)"
         | 
| 253 | 
             
                )
         | 
| 254 | 
             
                parser.add_argument(
         | 
| 255 | 
             
                    "--batch-size",
         | 
| 256 | 
             
                    type=int,
         | 
| 257 | 
             
                    default=8,
         | 
| 258 | 
            -
                    help="Batch size for processing (default: 8)"
         | 
| 259 | 
             
                )
         | 
| 260 | 
             
                parser.add_argument(
         | 
| 261 | 
             
                    "--model",
         | 
| 262 | 
             
                    default="nanonets/Nanonets-OCR-s",
         | 
| 263 | 
            -
                    help="Model to use (default: nanonets/Nanonets-OCR-s)"
         | 
| 264 | 
             
                )
         | 
| 265 | 
             
                parser.add_argument(
         | 
| 266 | 
             
                    "--max-model-len",
         | 
| 267 | 
             
                    type=int,
         | 
| 268 | 
             
                    default=8192,
         | 
| 269 | 
            -
                    help="Maximum model context length (default: 8192)"
         | 
| 270 | 
             
                )
         | 
| 271 | 
             
                parser.add_argument(
         | 
| 272 | 
             
                    "--max-tokens",
         | 
| 273 | 
             
                    type=int,
         | 
| 274 | 
             
                    default=4096,
         | 
| 275 | 
            -
                    help="Maximum tokens to generate (default: 4096)"
         | 
| 276 | 
             
                )
         | 
| 277 | 
             
                parser.add_argument(
         | 
| 278 | 
             
                    "--gpu-memory-utilization",
         | 
| 279 | 
             
                    type=float,
         | 
| 280 | 
             
                    default=0.7,
         | 
| 281 | 
            -
                    help="GPU memory utilization (default: 0.7)"
         | 
| 282 | 
            -
                )
         | 
| 283 | 
            -
                parser.add_argument(
         | 
| 284 | 
            -
                    "--hf-token",
         | 
| 285 | 
            -
                    help="Hugging Face API token"
         | 
| 286 | 
             
                )
         | 
|  | |
| 287 | 
             
                parser.add_argument(
         | 
| 288 | 
            -
                    "--split",
         | 
| 289 | 
            -
                    default="train",
         | 
| 290 | 
            -
                    help="Dataset split to use (default: train)"
         | 
| 291 | 
             
                )
         | 
| 292 | 
             
                parser.add_argument(
         | 
| 293 | 
             
                    "--max-samples",
         | 
| 294 | 
             
                    type=int,
         | 
| 295 | 
            -
                    help="Maximum number of samples to process (for testing)"
         | 
| 296 | 
             
                )
         | 
| 297 | 
             
                parser.add_argument(
         | 
| 298 | 
            -
                    "--private",
         | 
| 299 | 
            -
                    action="store_true",
         | 
| 300 | 
            -
                    help="Make output dataset private"
         | 
| 301 | 
             
                )
         | 
| 302 | 
            -
             | 
| 303 | 
             
                args = parser.parse_args()
         | 
| 304 | 
            -
             | 
| 305 | 
             
                main(
         | 
| 306 | 
             
                    input_dataset=args.input_dataset,
         | 
| 307 | 
             
                    output_dataset=args.output_dataset,
         | 
| @@ -315,4 +305,4 @@ Examples: | |
| 315 | 
             
                    split=args.split,
         | 
| 316 | 
             
                    max_samples=args.max_samples,
         | 
| 317 | 
             
                    private=args.private,
         | 
| 318 | 
            -
                )
         | 
|  | |
| 5 | 
             
            #     "huggingface-hub[hf_transfer]",
         | 
| 6 | 
             
            #     "pillow",
         | 
| 7 | 
             
            #     "vllm",
         | 
|  | |
| 8 | 
             
            #     "tqdm",
         | 
| 9 | 
             
            #     "toolz",
         | 
| 10 | 
             
            #     "torch",  # Added for CUDA check
         | 
| 11 | 
             
            # ]
         | 
| 12 | 
            +
            #
         | 
|  | |
|  | |
| 13 | 
             
            # ///
         | 
| 14 |  | 
| 15 | 
             
            """
         | 
|  | |
| 69 | 
             
                    pil_img = Image.open(image)
         | 
| 70 | 
             
                else:
         | 
| 71 | 
             
                    raise ValueError(f"Unsupported image type: {type(image)}")
         | 
| 72 | 
            +
             | 
| 73 | 
             
                # Convert to base64 data URI
         | 
| 74 | 
             
                buf = io.BytesIO()
         | 
| 75 | 
             
                pil_img.save(buf, format="PNG")
         | 
| 76 | 
             
                data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
         | 
| 77 | 
            +
             | 
| 78 | 
             
                # Return message in vLLM format
         | 
| 79 | 
             
                return [
         | 
| 80 | 
             
                    {
         | 
|  | |
| 102 | 
             
                private: bool = False,
         | 
| 103 | 
             
            ):
         | 
| 104 | 
             
                """Process images from HF dataset through OCR model."""
         | 
| 105 | 
            +
             | 
| 106 | 
             
                # Check CUDA availability first
         | 
| 107 | 
             
                check_cuda_availability()
         | 
| 108 | 
            +
             | 
| 109 | 
             
                # Enable HF_TRANSFER for faster downloads
         | 
| 110 | 
             
                os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
         | 
| 111 | 
            +
             | 
| 112 | 
             
                # Login to HF if token provided
         | 
| 113 | 
             
                HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
         | 
| 114 | 
             
                if HF_TOKEN:
         | 
| 115 | 
             
                    login(token=HF_TOKEN)
         | 
| 116 | 
            +
             | 
| 117 | 
             
                # Load dataset
         | 
| 118 | 
             
                logger.info(f"Loading dataset: {input_dataset}")
         | 
| 119 | 
             
                dataset = load_dataset(input_dataset, split=split)
         | 
| 120 | 
            +
             | 
| 121 | 
             
                # Validate image column
         | 
| 122 | 
             
                if image_column not in dataset.column_names:
         | 
| 123 | 
            +
                    raise ValueError(
         | 
| 124 | 
            +
                        f"Column '{image_column}' not found. Available: {dataset.column_names}"
         | 
| 125 | 
            +
                    )
         | 
| 126 | 
            +
             | 
| 127 | 
             
                # Limit samples if requested
         | 
| 128 | 
             
                if max_samples:
         | 
| 129 | 
             
                    dataset = dataset.select(range(min(max_samples, len(dataset))))
         | 
| 130 | 
             
                    logger.info(f"Limited to {len(dataset)} samples")
         | 
| 131 | 
            +
             | 
| 132 | 
             
                # Initialize vLLM
         | 
| 133 | 
             
                logger.info(f"Initializing vLLM with model: {model}")
         | 
| 134 | 
             
                llm = LLM(
         | 
|  | |
| 138 | 
             
                    gpu_memory_utilization=gpu_memory_utilization,
         | 
| 139 | 
             
                    limit_mm_per_prompt={"image": 1},
         | 
| 140 | 
             
                )
         | 
| 141 | 
            +
             | 
| 142 | 
             
                sampling_params = SamplingParams(
         | 
| 143 | 
             
                    temperature=0.0,  # Deterministic for OCR
         | 
| 144 | 
             
                    max_tokens=max_tokens,
         | 
| 145 | 
             
                )
         | 
| 146 | 
            +
             | 
| 147 | 
             
                # Process images in batches
         | 
| 148 | 
             
                all_markdown = []
         | 
| 149 | 
            +
             | 
| 150 | 
             
                logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
         | 
| 151 | 
            +
             | 
| 152 | 
             
                # Process in batches to avoid memory issues
         | 
| 153 | 
             
                for batch_indices in tqdm(
         | 
| 154 | 
             
                    partition_all(batch_size, range(len(dataset))),
         | 
| 155 | 
             
                    total=(len(dataset) + batch_size - 1) // batch_size,
         | 
| 156 | 
            +
                    desc="OCR processing",
         | 
| 157 | 
             
                ):
         | 
| 158 | 
             
                    batch_indices = list(batch_indices)
         | 
| 159 | 
             
                    batch_images = [dataset[i][image_column] for i in batch_indices]
         | 
| 160 | 
            +
             | 
| 161 | 
             
                    try:
         | 
| 162 | 
             
                        # Create messages for batch
         | 
| 163 | 
             
                        batch_messages = [make_ocr_message(img) for img in batch_images]
         | 
| 164 | 
            +
             | 
| 165 | 
             
                        # Process with vLLM
         | 
| 166 | 
             
                        outputs = llm.chat(batch_messages, sampling_params)
         | 
| 167 | 
            +
             | 
| 168 | 
             
                        # Extract markdown from outputs
         | 
| 169 | 
             
                        for output in outputs:
         | 
| 170 | 
             
                            markdown_text = output.outputs[0].text.strip()
         | 
| 171 | 
             
                            all_markdown.append(markdown_text)
         | 
| 172 | 
            +
             | 
| 173 | 
             
                    except Exception as e:
         | 
| 174 | 
             
                        logger.error(f"Error processing batch: {e}")
         | 
| 175 | 
             
                        # Add error placeholders for failed batch
         | 
| 176 | 
             
                        all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
         | 
| 177 | 
            +
             | 
| 178 | 
             
                # Add markdown column to dataset
         | 
| 179 | 
             
                logger.info("Adding markdown column to dataset")
         | 
| 180 | 
             
                dataset = dataset.add_column("markdown", all_markdown)
         | 
| 181 | 
            +
             | 
| 182 | 
             
                # Push to hub
         | 
| 183 | 
             
                logger.info(f"Pushing to {output_dataset}")
         | 
| 184 | 
             
                dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
         | 
| 185 | 
            +
             | 
| 186 | 
             
                logger.info("✅ OCR conversion complete!")
         | 
| 187 | 
            +
                logger.info(
         | 
| 188 | 
            +
                    f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
         | 
| 189 | 
            +
                )
         | 
| 190 |  | 
| 191 |  | 
| 192 | 
             
            if __name__ == "__main__":
         | 
|  | |
| 216 | 
             
                    print("   hfjobs run \\")
         | 
| 217 | 
             
                    print("     --flavor l4x1 \\")
         | 
| 218 | 
             
                    print("     --secret HF_TOKEN=... \\")
         | 
| 219 | 
            +
                    print(
         | 
| 220 | 
            +
                        "     uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr.py \\"
         | 
| 221 | 
            +
                    )
         | 
| 222 | 
             
                    print("       your-document-dataset \\")
         | 
| 223 | 
             
                    print("       your-markdown-output")
         | 
| 224 | 
             
                    print("\n" + "=" * 80)
         | 
| 225 | 
             
                    print("\nFor full help, run: uv run nanonets-ocr.py --help")
         | 
| 226 | 
             
                    sys.exit(0)
         | 
| 227 | 
            +
             | 
| 228 | 
             
                parser = argparse.ArgumentParser(
         | 
| 229 | 
             
                    description="OCR images to markdown using Nanonets-OCR-s",
         | 
| 230 | 
             
                    formatter_class=argparse.RawDescriptionHelpFormatter,
         | 
|  | |
| 238 |  | 
| 239 | 
             
              # Process subset for testing
         | 
| 240 | 
             
              uv run nanonets-ocr.py large-dataset test-output --max-samples 100
         | 
| 241 | 
            +
                    """,
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 242 | 
             
                )
         | 
| 243 | 
            +
             | 
| 244 | 
            +
                parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
         | 
| 245 | 
            +
                parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
         | 
| 246 | 
             
                parser.add_argument(
         | 
| 247 | 
             
                    "--image-column",
         | 
| 248 | 
             
                    default="image",
         | 
| 249 | 
            +
                    help="Column containing images (default: image)",
         | 
| 250 | 
             
                )
         | 
| 251 | 
             
                parser.add_argument(
         | 
| 252 | 
             
                    "--batch-size",
         | 
| 253 | 
             
                    type=int,
         | 
| 254 | 
             
                    default=8,
         | 
| 255 | 
            +
                    help="Batch size for processing (default: 8)",
         | 
| 256 | 
             
                )
         | 
| 257 | 
             
                parser.add_argument(
         | 
| 258 | 
             
                    "--model",
         | 
| 259 | 
             
                    default="nanonets/Nanonets-OCR-s",
         | 
| 260 | 
            +
                    help="Model to use (default: nanonets/Nanonets-OCR-s)",
         | 
| 261 | 
             
                )
         | 
| 262 | 
             
                parser.add_argument(
         | 
| 263 | 
             
                    "--max-model-len",
         | 
| 264 | 
             
                    type=int,
         | 
| 265 | 
             
                    default=8192,
         | 
| 266 | 
            +
                    help="Maximum model context length (default: 8192)",
         | 
| 267 | 
             
                )
         | 
| 268 | 
             
                parser.add_argument(
         | 
| 269 | 
             
                    "--max-tokens",
         | 
| 270 | 
             
                    type=int,
         | 
| 271 | 
             
                    default=4096,
         | 
| 272 | 
            +
                    help="Maximum tokens to generate (default: 4096)",
         | 
| 273 | 
             
                )
         | 
| 274 | 
             
                parser.add_argument(
         | 
| 275 | 
             
                    "--gpu-memory-utilization",
         | 
| 276 | 
             
                    type=float,
         | 
| 277 | 
             
                    default=0.7,
         | 
| 278 | 
            +
                    help="GPU memory utilization (default: 0.7)",
         | 
|  | |
|  | |
|  | |
|  | |
| 279 | 
             
                )
         | 
| 280 | 
            +
                parser.add_argument("--hf-token", help="Hugging Face API token")
         | 
| 281 | 
             
                parser.add_argument(
         | 
| 282 | 
            +
                    "--split", default="train", help="Dataset split to use (default: train)"
         | 
|  | |
|  | |
| 283 | 
             
                )
         | 
| 284 | 
             
                parser.add_argument(
         | 
| 285 | 
             
                    "--max-samples",
         | 
| 286 | 
             
                    type=int,
         | 
| 287 | 
            +
                    help="Maximum number of samples to process (for testing)",
         | 
| 288 | 
             
                )
         | 
| 289 | 
             
                parser.add_argument(
         | 
| 290 | 
            +
                    "--private", action="store_true", help="Make output dataset private"
         | 
|  | |
|  | |
| 291 | 
             
                )
         | 
| 292 | 
            +
             | 
| 293 | 
             
                args = parser.parse_args()
         | 
| 294 | 
            +
             | 
| 295 | 
             
                main(
         | 
| 296 | 
             
                    input_dataset=args.input_dataset,
         | 
| 297 | 
             
                    output_dataset=args.output_dataset,
         | 
|  | |
| 305 | 
             
                    split=args.split,
         | 
| 306 | 
             
                    max_samples=args.max_samples,
         | 
| 307 | 
             
                    private=args.private,
         | 
| 308 | 
            +
                )
         | 

