Commit
·
123f376
1
Parent(s):
dace8e2
Fix temp file handling in DeepSeek-OCR
Browse filesReplace NamedTemporaryFile with simple temp directory approach:
- Create temp dir once at start
- Reuse single temp file path for all images
- Clean up temp dir at end
This matches the official example pattern where model.infer()
expects a real file path string.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
- deepseek-ocr.py +63 -58
deepseek-ocr.py
CHANGED
|
@@ -40,6 +40,7 @@ import argparse
|
|
| 40 |
import json
|
| 41 |
import logging
|
| 42 |
import os
|
|
|
|
| 43 |
import sys
|
| 44 |
import tempfile
|
| 45 |
from datetime import datetime
|
|
@@ -199,39 +200,30 @@ def process_single_image(
|
|
| 199 |
base_size: int,
|
| 200 |
image_size: int,
|
| 201 |
crop_mode: bool,
|
|
|
|
| 202 |
) -> str:
|
| 203 |
"""Process a single image through DeepSeek-OCR."""
|
| 204 |
-
#
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
save_results=False,
|
| 224 |
-
test_compress=False,
|
| 225 |
-
)
|
| 226 |
-
|
| 227 |
-
return result if isinstance(result, str) else str(result)
|
| 228 |
|
| 229 |
-
|
| 230 |
-
# Clean up temp file
|
| 231 |
-
try:
|
| 232 |
-
os.unlink(tmp.name)
|
| 233 |
-
except:
|
| 234 |
-
pass
|
| 235 |
|
| 236 |
|
| 237 |
def main(
|
|
@@ -341,35 +333,48 @@ def main(
|
|
| 341 |
logger.info(f"Processing {len(dataset)} images (sequential, no batching)")
|
| 342 |
logger.info("Note: This may be slower than vLLM-based scripts")
|
| 343 |
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
# Handle different image formats
|
| 349 |
-
if isinstance(image, dict) and "bytes" in image:
|
| 350 |
-
from io import BytesIO
|
| 351 |
-
image = Image.open(BytesIO(image["bytes"]))
|
| 352 |
-
elif isinstance(image, str):
|
| 353 |
-
image = Image.open(image)
|
| 354 |
-
elif not isinstance(image, Image.Image):
|
| 355 |
-
raise ValueError(f"Unsupported image type: {type(image)}")
|
| 356 |
-
|
| 357 |
-
# Process image
|
| 358 |
-
result = process_single_image(
|
| 359 |
-
model_obj,
|
| 360 |
-
tokenizer,
|
| 361 |
-
image,
|
| 362 |
-
prompt,
|
| 363 |
-
final_base_size,
|
| 364 |
-
final_image_size,
|
| 365 |
-
final_crop_mode,
|
| 366 |
-
)
|
| 367 |
-
|
| 368 |
-
all_markdown.append(result)
|
| 369 |
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
|
| 374 |
# Add markdown column to dataset
|
| 375 |
logger.info("Adding markdown column to dataset")
|
|
|
|
| 40 |
import json
|
| 41 |
import logging
|
| 42 |
import os
|
| 43 |
+
import shutil
|
| 44 |
import sys
|
| 45 |
import tempfile
|
| 46 |
from datetime import datetime
|
|
|
|
| 200 |
base_size: int,
|
| 201 |
image_size: int,
|
| 202 |
crop_mode: bool,
|
| 203 |
+
temp_image_path: str,
|
| 204 |
) -> str:
|
| 205 |
"""Process a single image through DeepSeek-OCR."""
|
| 206 |
+
# Convert to RGB if needed
|
| 207 |
+
if image.mode != "RGB":
|
| 208 |
+
image = image.convert("RGB")
|
| 209 |
+
|
| 210 |
+
# Save to temp file (model.infer expects a file path)
|
| 211 |
+
image.save(temp_image_path, format="PNG")
|
| 212 |
+
|
| 213 |
+
# Run inference
|
| 214 |
+
result = model.infer(
|
| 215 |
+
tokenizer,
|
| 216 |
+
prompt=prompt,
|
| 217 |
+
image_file=temp_image_path,
|
| 218 |
+
output_path="", # Don't save intermediate files
|
| 219 |
+
base_size=base_size,
|
| 220 |
+
image_size=image_size,
|
| 221 |
+
crop_mode=crop_mode,
|
| 222 |
+
save_results=False,
|
| 223 |
+
test_compress=False,
|
| 224 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
+
return result if isinstance(result, str) else str(result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
|
| 228 |
|
| 229 |
def main(
|
|
|
|
| 333 |
logger.info(f"Processing {len(dataset)} images (sequential, no batching)")
|
| 334 |
logger.info("Note: This may be slower than vLLM-based scripts")
|
| 335 |
|
| 336 |
+
# Create temp directory for image files
|
| 337 |
+
temp_dir = tempfile.mkdtemp()
|
| 338 |
+
temp_image_path = os.path.join(temp_dir, "temp_image.png")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 339 |
|
| 340 |
+
try:
|
| 341 |
+
for i in tqdm(range(len(dataset)), desc="OCR processing"):
|
| 342 |
+
try:
|
| 343 |
+
image = dataset[i][image_column]
|
| 344 |
+
|
| 345 |
+
# Handle different image formats
|
| 346 |
+
if isinstance(image, dict) and "bytes" in image:
|
| 347 |
+
from io import BytesIO
|
| 348 |
+
image = Image.open(BytesIO(image["bytes"]))
|
| 349 |
+
elif isinstance(image, str):
|
| 350 |
+
image = Image.open(image)
|
| 351 |
+
elif not isinstance(image, Image.Image):
|
| 352 |
+
raise ValueError(f"Unsupported image type: {type(image)}")
|
| 353 |
+
|
| 354 |
+
# Process image
|
| 355 |
+
result = process_single_image(
|
| 356 |
+
model_obj,
|
| 357 |
+
tokenizer,
|
| 358 |
+
image,
|
| 359 |
+
prompt,
|
| 360 |
+
final_base_size,
|
| 361 |
+
final_image_size,
|
| 362 |
+
final_crop_mode,
|
| 363 |
+
temp_image_path,
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
all_markdown.append(result)
|
| 367 |
+
|
| 368 |
+
except Exception as e:
|
| 369 |
+
logger.error(f"Error processing image {i}: {e}")
|
| 370 |
+
all_markdown.append("[OCR FAILED]")
|
| 371 |
+
|
| 372 |
+
finally:
|
| 373 |
+
# Clean up temp directory
|
| 374 |
+
try:
|
| 375 |
+
shutil.rmtree(temp_dir)
|
| 376 |
+
except:
|
| 377 |
+
pass
|
| 378 |
|
| 379 |
# Add markdown column to dataset
|
| 380 |
logger.info("Adding markdown column to dataset")
|