Commit
·
c588c51
1
Parent(s):
8388345
Change olmOCR2 temperature from 0.1 to 0.0 to match original implementation
Browse files- Update default temperature to 0.0 (was 0.1)
- Matches olmOCR pipeline.py default (build_page_query uses 0.0)
- Should help reduce repetitive generation issues
- Add olmOCR2 documentation to README
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
- olmocr2-vllm.py +5 -5
olmocr2-vllm.py
CHANGED
|
@@ -263,7 +263,7 @@ def main(
|
|
| 263 |
model: str = "allenai/olmOCR-2-7B-1025-FP8",
|
| 264 |
max_model_len: int = 16384,
|
| 265 |
max_tokens: int = 8192,
|
| 266 |
-
temperature: float = 0.
|
| 267 |
gpu_memory_utilization: float = 0.8,
|
| 268 |
hf_token: str = None,
|
| 269 |
split: str = "train",
|
|
@@ -283,7 +283,7 @@ def main(
|
|
| 283 |
model: HuggingFace model ID for olmOCR
|
| 284 |
max_model_len: Maximum context length
|
| 285 |
max_tokens: Maximum tokens to generate per image
|
| 286 |
-
temperature: Sampling temperature (0.
|
| 287 |
gpu_memory_utilization: Fraction of GPU memory to use
|
| 288 |
hf_token: HuggingFace token for authentication
|
| 289 |
split: Dataset split to process
|
|
@@ -334,7 +334,7 @@ def main(
|
|
| 334 |
limit_mm_per_prompt={"image": 1},
|
| 335 |
)
|
| 336 |
|
| 337 |
-
# Sampling parameters - olmOCR uses
|
| 338 |
sampling_params = SamplingParams(
|
| 339 |
temperature=temperature,
|
| 340 |
max_tokens=max_tokens,
|
|
@@ -508,8 +508,8 @@ Examples:
|
|
| 508 |
parser.add_argument(
|
| 509 |
"--temperature",
|
| 510 |
type=float,
|
| 511 |
-
default=0.
|
| 512 |
-
help="Sampling temperature (default: 0.
|
| 513 |
)
|
| 514 |
parser.add_argument(
|
| 515 |
"--gpu-memory-utilization",
|
|
|
|
| 263 |
model: str = "allenai/olmOCR-2-7B-1025-FP8",
|
| 264 |
max_model_len: int = 16384,
|
| 265 |
max_tokens: int = 8192,
|
| 266 |
+
temperature: float = 0.0,
|
| 267 |
gpu_memory_utilization: float = 0.8,
|
| 268 |
hf_token: str = None,
|
| 269 |
split: str = "train",
|
|
|
|
| 283 |
model: HuggingFace model ID for olmOCR
|
| 284 |
max_model_len: Maximum context length
|
| 285 |
max_tokens: Maximum tokens to generate per image
|
| 286 |
+
temperature: Sampling temperature (0.0 for deterministic)
|
| 287 |
gpu_memory_utilization: Fraction of GPU memory to use
|
| 288 |
hf_token: HuggingFace token for authentication
|
| 289 |
split: Dataset split to process
|
|
|
|
| 334 |
limit_mm_per_prompt={"image": 1},
|
| 335 |
)
|
| 336 |
|
| 337 |
+
# Sampling parameters - olmOCR uses temperature 0.0 for deterministic output
|
| 338 |
sampling_params = SamplingParams(
|
| 339 |
temperature=temperature,
|
| 340 |
max_tokens=max_tokens,
|
|
|
|
| 508 |
parser.add_argument(
|
| 509 |
"--temperature",
|
| 510 |
type=float,
|
| 511 |
+
default=0.0,
|
| 512 |
+
help="Sampling temperature (default: 0.0 for deterministic output)",
|
| 513 |
)
|
| 514 |
parser.add_argument(
|
| 515 |
"--gpu-memory-utilization",
|