davanstrien HF Staff Claude commited on
Commit
ea32fa8
·
1 Parent(s): a8ad6e1

Critical quality fixes: temperature 0.1 and image resizing to 1288px

Browse files

Two critical changes to match official olmOCR implementation:

1. **Temperature 0.1 (was 0.0)**
- Matches olmOCR transformers example
- Their pipeline also starts at 0.1 (not 0.0)
- Temperature 0.0 with sampling can cause quality issues

2. **Image resizing to 1288px on longest edge**
- Official pipeline default: target_longest_image_dim=1288
- Model was trained with images at this resolution
- Ensures consistent token counts and detail level
- Uses LANCZOS resampling for quality

These are the two highest-impact fixes identified by code review comparing
our implementation to official olmOCR pipeline and transformers example.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

Files changed (1) hide show
  1. olmocr2-vllm.py +23 -6
olmocr2-vllm.py CHANGED
@@ -117,8 +117,15 @@ def parse_yaml_frontmatter(text: str) -> tuple[dict, str]:
117
  def make_ocr_message(
118
  image: Union[Image.Image, Dict[str, Any], str],
119
  prompt: str = OLMOCR_PROMPT,
 
120
  ) -> List[Dict]:
121
- """Create chat message for olmOCR processing."""
 
 
 
 
 
 
122
  # Convert to PIL Image if needed
123
  if isinstance(image, Image.Image):
124
  pil_img = image
@@ -129,6 +136,16 @@ def make_ocr_message(
129
  else:
130
  raise ValueError(f"Unsupported image type: {type(image)}")
131
 
 
 
 
 
 
 
 
 
 
 
132
  # Convert to base64 data URI
133
  buf = io.BytesIO()
134
  pil_img.save(buf, format="PNG")
@@ -265,7 +282,7 @@ def main(
265
  model: str = "allenai/olmOCR-2-7B-1025-FP8",
266
  max_model_len: int = 16384,
267
  max_tokens: int = 8192,
268
- temperature: float = 0.0,
269
  gpu_memory_utilization: float = 0.8,
270
  guided_decoding: bool = False,
271
  hf_token: str = None,
@@ -287,7 +304,7 @@ def main(
287
  model: HuggingFace model ID for olmOCR
288
  max_model_len: Maximum context length
289
  max_tokens: Maximum tokens to generate per image
290
- temperature: Sampling temperature (0.0 for deterministic)
291
  gpu_memory_utilization: Fraction of GPU memory to use
292
  guided_decoding: Enable guided decoding with regex for YAML front matter
293
  hf_token: HuggingFace token for authentication
@@ -340,7 +357,7 @@ def main(
340
  limit_mm_per_prompt={"image": 1},
341
  )
342
 
343
- # Sampling parameters - olmOCR uses temperature 0.0 for deterministic output
344
  sampling_params_kwargs = {
345
  "temperature": temperature,
346
  "max_tokens": max_tokens,
@@ -544,8 +561,8 @@ Examples:
544
  parser.add_argument(
545
  "--temperature",
546
  type=float,
547
- default=0.0,
548
- help="Sampling temperature (default: 0.0 for deterministic output)",
549
  )
550
  parser.add_argument(
551
  "--gpu-memory-utilization",
 
117
  def make_ocr_message(
118
  image: Union[Image.Image, Dict[str, Any], str],
119
  prompt: str = OLMOCR_PROMPT,
120
+ target_longest_dim: int = 1288,
121
  ) -> List[Dict]:
122
+ """Create chat message for olmOCR processing.
123
+
124
+ Args:
125
+ image: Input image (PIL Image, dict with bytes, or path)
126
+ prompt: OCR prompt text
127
+ target_longest_dim: Target size for longest image dimension (default 1288, matching olmOCR)
128
+ """
129
  # Convert to PIL Image if needed
130
  if isinstance(image, Image.Image):
131
  pil_img = image
 
136
  else:
137
  raise ValueError(f"Unsupported image type: {type(image)}")
138
 
139
+ # Resize image to target dimension (matching olmOCR pipeline default of 1288px)
140
+ width, height = pil_img.size
141
+ longest_side = max(width, height)
142
+ if longest_side != target_longest_dim:
143
+ scale = target_longest_dim / longest_side
144
+ new_width = int(width * scale)
145
+ new_height = int(height * scale)
146
+ pil_img = pil_img.resize((new_width, new_height), Image.Resampling.LANCZOS)
147
+ logger.debug(f"Resized image from {width}x{height} to {new_width}x{new_height}")
148
+
149
  # Convert to base64 data URI
150
  buf = io.BytesIO()
151
  pil_img.save(buf, format="PNG")
 
282
  model: str = "allenai/olmOCR-2-7B-1025-FP8",
283
  max_model_len: int = 16384,
284
  max_tokens: int = 8192,
285
+ temperature: float = 0.1,
286
  gpu_memory_utilization: float = 0.8,
287
  guided_decoding: bool = False,
288
  hf_token: str = None,
 
304
  model: HuggingFace model ID for olmOCR
305
  max_model_len: Maximum context length
306
  max_tokens: Maximum tokens to generate per image
307
+ temperature: Sampling temperature (0.1 default, matches olmOCR)
308
  gpu_memory_utilization: Fraction of GPU memory to use
309
  guided_decoding: Enable guided decoding with regex for YAML front matter
310
  hf_token: HuggingFace token for authentication
 
357
  limit_mm_per_prompt={"image": 1},
358
  )
359
 
360
+ # Sampling parameters - olmOCR uses temperature 0.1 (transformers example)
361
  sampling_params_kwargs = {
362
  "temperature": temperature,
363
  "max_tokens": max_tokens,
 
561
  parser.add_argument(
562
  "--temperature",
563
  type=float,
564
+ default=0.1,
565
+ help="Sampling temperature (default: 0.1, matches olmOCR transformers example)",
566
  )
567
  parser.add_argument(
568
  "--gpu-memory-utilization",