Commit
·
9e98db3
1
Parent(s):
ee028d2
Add repetition penalty, guided decoding, and finish_reason logging
Browse filesThree improvements to address output quality issues:
1. **Repetition penalty (1.05)** - Discourages repetitive output like "ACA ACA ACA"
2. **Guided decoding (--guided-decoding flag)** - Optional regex enforcement of YAML front matter structure using GuidedDecodingParams
3. **Finish reason logging** - Warns when generation doesn't complete naturally (e.g., hits max_tokens)
Usage:
# With guided decoding enabled
uv run olmocr2-vllm.py input output --guided-decoding
Matches olmOCR pipeline implementation for better quality and reliability.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
- olmocr2-vllm.py +32 -6
olmocr2-vllm.py
CHANGED
|
@@ -51,6 +51,7 @@ from PIL import Image
|
|
| 51 |
from toolz import partition_all
|
| 52 |
from tqdm.auto import tqdm
|
| 53 |
from vllm import LLM, SamplingParams
|
|
|
|
| 54 |
|
| 55 |
logging.basicConfig(level=logging.INFO)
|
| 56 |
logger = logging.getLogger(__name__)
|
|
@@ -266,6 +267,7 @@ def main(
|
|
| 266 |
max_tokens: int = 8192,
|
| 267 |
temperature: float = 0.0,
|
| 268 |
gpu_memory_utilization: float = 0.8,
|
|
|
|
| 269 |
hf_token: str = None,
|
| 270 |
split: str = "train",
|
| 271 |
max_samples: int = None,
|
|
@@ -287,6 +289,7 @@ def main(
|
|
| 287 |
max_tokens: Maximum tokens to generate per image
|
| 288 |
temperature: Sampling temperature (0.0 for deterministic)
|
| 289 |
gpu_memory_utilization: Fraction of GPU memory to use
|
|
|
|
| 290 |
hf_token: HuggingFace token for authentication
|
| 291 |
split: Dataset split to process
|
| 292 |
max_samples: Limit number of samples (for testing)
|
|
@@ -338,11 +341,22 @@ def main(
|
|
| 338 |
)
|
| 339 |
|
| 340 |
# Sampling parameters - olmOCR uses temperature 0.0 for deterministic output
|
| 341 |
-
|
| 342 |
-
temperature
|
| 343 |
-
max_tokens
|
| 344 |
-
|
| 345 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
|
| 347 |
# Process in batches
|
| 348 |
all_outputs = []
|
|
@@ -359,8 +373,14 @@ def main(
|
|
| 359 |
outputs = llm.chat(messages, sampling_params=sampling_params)
|
| 360 |
|
| 361 |
# Extract text and parse YAML front matter
|
| 362 |
-
for output in outputs:
|
| 363 |
response_text = output.outputs[0].text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 364 |
metadata, content = parse_yaml_frontmatter(response_text)
|
| 365 |
all_outputs.append(content)
|
| 366 |
all_metadata.append(json.dumps(metadata))
|
|
@@ -533,6 +553,11 @@ Examples:
|
|
| 533 |
default=0.8,
|
| 534 |
help="GPU memory utilization (default: 0.8)",
|
| 535 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 536 |
parser.add_argument(
|
| 537 |
"--hf-token",
|
| 538 |
help="HuggingFace token (or set HF_TOKEN env var)",
|
|
@@ -576,6 +601,7 @@ Examples:
|
|
| 576 |
max_tokens=args.max_tokens,
|
| 577 |
temperature=args.temperature,
|
| 578 |
gpu_memory_utilization=args.gpu_memory_utilization,
|
|
|
|
| 579 |
hf_token=args.hf_token,
|
| 580 |
split=args.split,
|
| 581 |
max_samples=args.max_samples,
|
|
|
|
| 51 |
from toolz import partition_all
|
| 52 |
from tqdm.auto import tqdm
|
| 53 |
from vllm import LLM, SamplingParams
|
| 54 |
+
from vllm.sampling_params import GuidedDecodingParams
|
| 55 |
|
| 56 |
logging.basicConfig(level=logging.INFO)
|
| 57 |
logger = logging.getLogger(__name__)
|
|
|
|
| 267 |
max_tokens: int = 8192,
|
| 268 |
temperature: float = 0.0,
|
| 269 |
gpu_memory_utilization: float = 0.8,
|
| 270 |
+
guided_decoding: bool = False,
|
| 271 |
hf_token: str = None,
|
| 272 |
split: str = "train",
|
| 273 |
max_samples: int = None,
|
|
|
|
| 289 |
max_tokens: Maximum tokens to generate per image
|
| 290 |
temperature: Sampling temperature (0.0 for deterministic)
|
| 291 |
gpu_memory_utilization: Fraction of GPU memory to use
|
| 292 |
+
guided_decoding: Enable guided decoding with regex for YAML front matter
|
| 293 |
hf_token: HuggingFace token for authentication
|
| 294 |
split: Dataset split to process
|
| 295 |
max_samples: Limit number of samples (for testing)
|
|
|
|
| 341 |
)
|
| 342 |
|
| 343 |
# Sampling parameters - olmOCR uses temperature 0.0 for deterministic output
|
| 344 |
+
sampling_params_kwargs = {
|
| 345 |
+
"temperature": temperature,
|
| 346 |
+
"max_tokens": max_tokens,
|
| 347 |
+
"repetition_penalty": 1.05, # Discourage repetitive output
|
| 348 |
+
"stop": ["<|im_end|>", "<|endoftext|>"],
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
# Add guided decoding if requested (enforces YAML front matter structure)
|
| 352 |
+
if guided_decoding:
|
| 353 |
+
logger.info("Enabling guided decoding with YAML front matter regex")
|
| 354 |
+
guided_params = GuidedDecodingParams(
|
| 355 |
+
regex=r"---\nprimary_language: (?:[a-z]{2}|null)\nis_rotation_valid: (?:True|False|true|false)\nrotation_correction: (?:0|90|180|270)\nis_table: (?:True|False|true|false)\nis_diagram: (?:True|False|true|false)\n(?:---|---\n[\s\S]+)"
|
| 356 |
+
)
|
| 357 |
+
sampling_params_kwargs["guided_decoding"] = guided_params
|
| 358 |
+
|
| 359 |
+
sampling_params = SamplingParams(**sampling_params_kwargs)
|
| 360 |
|
| 361 |
# Process in batches
|
| 362 |
all_outputs = []
|
|
|
|
| 373 |
outputs = llm.chat(messages, sampling_params=sampling_params)
|
| 374 |
|
| 375 |
# Extract text and parse YAML front matter
|
| 376 |
+
for idx, output in enumerate(outputs):
|
| 377 |
response_text = output.outputs[0].text
|
| 378 |
+
finish_reason = output.outputs[0].finish_reason
|
| 379 |
+
|
| 380 |
+
# Log warning if generation didn't finish naturally
|
| 381 |
+
if finish_reason != "stop":
|
| 382 |
+
logger.warning(f"Generation did not finish naturally (reason: {finish_reason}), output may be incomplete")
|
| 383 |
+
|
| 384 |
metadata, content = parse_yaml_frontmatter(response_text)
|
| 385 |
all_outputs.append(content)
|
| 386 |
all_metadata.append(json.dumps(metadata))
|
|
|
|
| 553 |
default=0.8,
|
| 554 |
help="GPU memory utilization (default: 0.8)",
|
| 555 |
)
|
| 556 |
+
parser.add_argument(
|
| 557 |
+
"--guided-decoding",
|
| 558 |
+
action="store_true",
|
| 559 |
+
help="Enable guided decoding with regex for YAML front matter structure",
|
| 560 |
+
)
|
| 561 |
parser.add_argument(
|
| 562 |
"--hf-token",
|
| 563 |
help="HuggingFace token (or set HF_TOKEN env var)",
|
|
|
|
| 601 |
max_tokens=args.max_tokens,
|
| 602 |
temperature=args.temperature,
|
| 603 |
gpu_memory_utilization=args.gpu_memory_utilization,
|
| 604 |
+
guided_decoding=args.guided_decoding,
|
| 605 |
hf_token=args.hf_token,
|
| 606 |
split=args.split,
|
| 607 |
max_samples=args.max_samples,
|