davanstrien HF Staff commited on
Commit
fa7a872
Β·
1 Parent(s): c0663cf

Simplify nanonets-ocr2.py to 3B-only and fix dataset card

Browse files

- Remove 1.5B model support (vLLM compatibility issues)
- Focus on working Nanonets-OCR2-3B model (3.75B params)
- Remove viewer:false from dataset card (allow dataset viewer)
- Set batch size default to 16 (appropriate for 3B model)
- Clean up documentation and help text

πŸ€– Generated with Claude Code

Files changed (1) hide show
  1. nanonets-ocr2.py +20 -49
nanonets-ocr2.py CHANGED
@@ -13,14 +13,10 @@
13
  # ///
14
 
15
  """
16
- Convert document images to markdown using Nanonets-OCR2 models with vLLM.
17
 
18
- This script processes images through Nanonets-OCR2 models (1.5B or 3B) to extract
19
- text and structure as markdown, ideal for document understanding tasks.
20
-
21
- Models:
22
- - Nanonets-OCR2-3B (default): 3.75B params, best quality
23
- - Nanonets-OCR2-1.5B-exp: 1.65B params, faster processing
24
 
25
  Features:
26
  - LaTeX equation recognition
@@ -110,10 +106,8 @@ def create_dataset_card(
110
  ) -> str:
111
  """Create a dataset card documenting the OCR process."""
112
  model_name = model.split("/")[-1]
113
- model_size = "3B" if "3B" in model else "1.5B"
114
 
115
  return f"""---
116
- viewer: false
117
  tags:
118
  - ocr
119
  - document-processing
@@ -126,13 +120,13 @@ tags:
126
 
127
  # Document OCR using {model_name}
128
 
129
- This dataset contains markdown-formatted OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using Nanonets-OCR2-{model_size}.
130
 
131
  ## Processing Details
132
 
133
  - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
134
  - **Model**: [{model}](https://huggingface.co/{model})
135
- - **Model Size**: {model_size} parameters
136
  - **Number of Samples**: {num_samples:,}
137
  - **Processing Time**: {processing_time}
138
  - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
@@ -149,7 +143,7 @@ This dataset contains markdown-formatted OCR results from images in [{source_dat
149
 
150
  ## Model Information
151
 
152
- Nanonets-OCR2-{model_size} is a state-of-the-art document OCR model that excels at:
153
  - πŸ“ **LaTeX equations** - Mathematical formulas preserved in LaTeX format
154
  - πŸ“Š **Tables** - Extracted and formatted as HTML
155
  - πŸ“ **Document structure** - Headers, lists, and formatting maintained
@@ -214,7 +208,7 @@ def main(
214
  input_dataset: str,
215
  output_dataset: str,
216
  image_column: str = "image",
217
- batch_size: int = None,
218
  model: str = "nanonets/Nanonets-OCR2-3B",
219
  max_model_len: int = 8192,
220
  max_tokens: int = 4096,
@@ -226,16 +220,7 @@ def main(
226
  shuffle: bool = False,
227
  seed: int = 42,
228
  ):
229
- """Process images from HF dataset through Nanonets-OCR2 model."""
230
-
231
- # Auto-set batch size based on model if not specified
232
- if batch_size is None:
233
- if "1.5B" in model:
234
- batch_size = 32
235
- logger.info("Auto-set batch size to 32 for 1.5B model")
236
- else: # 3B model
237
- batch_size = 16
238
- logger.info("Auto-set batch size to 16 for 3B model")
239
 
240
  # Check CUDA availability first
241
  check_cuda_availability()
@@ -395,13 +380,10 @@ if __name__ == "__main__":
395
  # Show example usage if no arguments
396
  if len(sys.argv) == 1:
397
  print("=" * 80)
398
- print("Nanonets OCR2 to Markdown Converter")
399
  print("=" * 80)
400
  print("\nThis script converts document images to structured markdown using")
401
- print("Nanonets-OCR2 models (1.5B or 3B) with vLLM acceleration.")
402
- print("\nModel Options:")
403
- print("- Nanonets-OCR2-3B (default): 3.75B params, best quality")
404
- print("- Nanonets-OCR2-1.5B-exp: 1.65B params, faster processing")
405
  print("\nFeatures:")
406
  print("- LaTeX equation recognition")
407
  print("- Table extraction and formatting (HTML)")
@@ -411,22 +393,19 @@ if __name__ == "__main__":
411
  print("- Checkbox recognition (☐/β˜‘)")
412
  print("- Multilingual support")
413
  print("\nExample usage:")
414
- print("\n1. Basic OCR conversion (3B model, best quality):")
415
  print(" uv run nanonets-ocr2.py document-images markdown-docs")
416
- print("\n2. Fast processing with 1.5B model:")
417
- print(" uv run nanonets-ocr2.py documents output \\")
418
- print(" --model nanonets/Nanonets-OCR2-1.5B-exp")
419
- print("\n3. With custom settings:")
420
  print(" uv run nanonets-ocr2.py scanned-pdfs extracted-text \\")
421
  print(" --image-column page \\")
422
  print(" --batch-size 32 \\")
423
  print(" --gpu-memory-utilization 0.8")
424
- print("\n4. Process a subset for testing:")
425
  print(" uv run nanonets-ocr2.py large-dataset test-output --max-samples 10")
426
- print("\n5. Random sample from ordered dataset:")
427
  print(" uv run nanonets-ocr2.py ordered-dataset random-test \\")
428
  print(" --max-samples 50 --shuffle")
429
- print("\n6. Running on HF Jobs:")
430
  print(" hf jobs uv run --flavor l4x1 \\")
431
  print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
432
  print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py \\")
@@ -437,20 +416,13 @@ if __name__ == "__main__":
437
  sys.exit(0)
438
 
439
  parser = argparse.ArgumentParser(
440
- description="OCR images to markdown using Nanonets-OCR2 models",
441
  formatter_class=argparse.RawDescriptionHelpFormatter,
442
  epilog="""
443
- Models:
444
- nanonets/Nanonets-OCR2-3B (default) - 3.75B params, best quality
445
- nanonets/Nanonets-OCR2-1.5B-exp - 1.65B params, faster
446
-
447
  Examples:
448
- # Basic usage (3B model)
449
  uv run nanonets-ocr2.py my-images-dataset ocr-results
450
 
451
- # Fast processing with 1.5B model
452
- uv run nanonets-ocr2.py documents output --model nanonets/Nanonets-OCR2-1.5B-exp
453
-
454
  # With specific image column
455
  uv run nanonets-ocr2.py documents extracted-text --image-column scan
456
 
@@ -472,14 +444,13 @@ Examples:
472
  parser.add_argument(
473
  "--batch-size",
474
  type=int,
475
- default=None,
476
- help="Batch size for processing (default: auto - 16 for 3B, 32 for 1.5B)",
477
  )
478
  parser.add_argument(
479
  "--model",
480
  default="nanonets/Nanonets-OCR2-3B",
481
- choices=["nanonets/Nanonets-OCR2-3B", "nanonets/Nanonets-OCR2-1.5B-exp"],
482
- help="Model to use (default: Nanonets-OCR2-3B for best quality)",
483
  )
484
  parser.add_argument(
485
  "--max-model-len",
 
13
  # ///
14
 
15
  """
16
+ Convert document images to markdown using Nanonets-OCR2-3B with vLLM.
17
 
18
+ This script processes images through the Nanonets-OCR2-3B model (3.75B params)
19
+ to extract text and structure as markdown, ideal for document understanding tasks.
 
 
 
 
20
 
21
  Features:
22
  - LaTeX equation recognition
 
106
  ) -> str:
107
  """Create a dataset card documenting the OCR process."""
108
  model_name = model.split("/")[-1]
 
109
 
110
  return f"""---
 
111
  tags:
112
  - ocr
113
  - document-processing
 
120
 
121
  # Document OCR using {model_name}
122
 
123
+ This dataset contains markdown-formatted OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using Nanonets-OCR2-3B.
124
 
125
  ## Processing Details
126
 
127
  - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
128
  - **Model**: [{model}](https://huggingface.co/{model})
129
+ - **Model Size**: 3.75B parameters
130
  - **Number of Samples**: {num_samples:,}
131
  - **Processing Time**: {processing_time}
132
  - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
 
143
 
144
  ## Model Information
145
 
146
+ Nanonets-OCR2-3B is a state-of-the-art document OCR model that excels at:
147
  - πŸ“ **LaTeX equations** - Mathematical formulas preserved in LaTeX format
148
  - πŸ“Š **Tables** - Extracted and formatted as HTML
149
  - πŸ“ **Document structure** - Headers, lists, and formatting maintained
 
208
  input_dataset: str,
209
  output_dataset: str,
210
  image_column: str = "image",
211
+ batch_size: int = 16,
212
  model: str = "nanonets/Nanonets-OCR2-3B",
213
  max_model_len: int = 8192,
214
  max_tokens: int = 4096,
 
220
  shuffle: bool = False,
221
  seed: int = 42,
222
  ):
223
+ """Process images from HF dataset through Nanonets-OCR2-3B model."""
 
 
 
 
 
 
 
 
 
224
 
225
  # Check CUDA availability first
226
  check_cuda_availability()
 
380
  # Show example usage if no arguments
381
  if len(sys.argv) == 1:
382
  print("=" * 80)
383
+ print("Nanonets OCR2-3B to Markdown Converter")
384
  print("=" * 80)
385
  print("\nThis script converts document images to structured markdown using")
386
+ print("the Nanonets-OCR2-3B model (3.75B params) with vLLM acceleration.")
 
 
 
387
  print("\nFeatures:")
388
  print("- LaTeX equation recognition")
389
  print("- Table extraction and formatting (HTML)")
 
393
  print("- Checkbox recognition (☐/β˜‘)")
394
  print("- Multilingual support")
395
  print("\nExample usage:")
396
+ print("\n1. Basic OCR conversion:")
397
  print(" uv run nanonets-ocr2.py document-images markdown-docs")
398
+ print("\n2. With custom settings:")
 
 
 
399
  print(" uv run nanonets-ocr2.py scanned-pdfs extracted-text \\")
400
  print(" --image-column page \\")
401
  print(" --batch-size 32 \\")
402
  print(" --gpu-memory-utilization 0.8")
403
+ print("\n3. Process a subset for testing:")
404
  print(" uv run nanonets-ocr2.py large-dataset test-output --max-samples 10")
405
+ print("\n4. Random sample from ordered dataset:")
406
  print(" uv run nanonets-ocr2.py ordered-dataset random-test \\")
407
  print(" --max-samples 50 --shuffle")
408
+ print("\n5. Running on HF Jobs:")
409
  print(" hf jobs uv run --flavor l4x1 \\")
410
  print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
411
  print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py \\")
 
416
  sys.exit(0)
417
 
418
  parser = argparse.ArgumentParser(
419
+ description="OCR images to markdown using Nanonets-OCR2-3B",
420
  formatter_class=argparse.RawDescriptionHelpFormatter,
421
  epilog="""
 
 
 
 
422
  Examples:
423
+ # Basic usage
424
  uv run nanonets-ocr2.py my-images-dataset ocr-results
425
 
 
 
 
426
  # With specific image column
427
  uv run nanonets-ocr2.py documents extracted-text --image-column scan
428
 
 
444
  parser.add_argument(
445
  "--batch-size",
446
  type=int,
447
+ default=16,
448
+ help="Batch size for processing (default: 16)",
449
  )
450
  parser.add_argument(
451
  "--model",
452
  default="nanonets/Nanonets-OCR2-3B",
453
+ help="Model to use (default: nanonets/Nanonets-OCR2-3B)",
 
454
  )
455
  parser.add_argument(
456
  "--max-model-len",