davanstrien HF Staff commited on
Commit
c0663cf
·
1 Parent(s): 90ace90

Add Nanonets-OCR2 script with dual model support (1.5B/3B)

Browse files

- Supports both Nanonets-OCR2-3B (3.75B params, best quality) and Nanonets-OCR2-1.5B-exp (1.65B params, faster)
- Auto-adjusts batch size based on model selection (16 for 3B, 32 for 1.5B)
- Same prompts and capabilities across both models
- LaTeX equations, HTML tables, image captions, watermarks, checkboxes
- Multilingual support

🤖 Generated with Claude Code

Files changed (1) hide show
  1. nanonets-ocr2.py +543 -0
nanonets-ocr2.py ADDED
@@ -0,0 +1,543 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets",
5
+ # "huggingface-hub[hf_transfer]",
6
+ # "pillow",
7
+ # "vllm",
8
+ # "tqdm",
9
+ # "toolz",
10
+ # "torch",
11
+ # ]
12
+ #
13
+ # ///
14
+
15
+ """
16
+ Convert document images to markdown using Nanonets-OCR2 models with vLLM.
17
+
18
+ This script processes images through Nanonets-OCR2 models (1.5B or 3B) to extract
19
+ text and structure as markdown, ideal for document understanding tasks.
20
+
21
+ Models:
22
+ - Nanonets-OCR2-3B (default): 3.75B params, best quality
23
+ - Nanonets-OCR2-1.5B-exp: 1.65B params, faster processing
24
+
25
+ Features:
26
+ - LaTeX equation recognition
27
+ - Table extraction and formatting (HTML)
28
+ - Document structure preservation
29
+ - Image descriptions and captions
30
+ - Signature and watermark detection
31
+ - Checkbox recognition
32
+ - Multilingual support
33
+ """
34
+
35
+ import argparse
36
+ import base64
37
+ import io
38
+ import json
39
+ import logging
40
+ import os
41
+ import sys
42
+ from typing import Any, Dict, List, Union
43
+ from datetime import datetime
44
+
45
+ import torch
46
+ from datasets import load_dataset
47
+ from huggingface_hub import DatasetCard, login
48
+ from PIL import Image
49
+ from toolz import partition_all
50
+ from tqdm.auto import tqdm
51
+ from vllm import LLM, SamplingParams
52
+
53
+ logging.basicConfig(level=logging.INFO)
54
+ logger = logging.getLogger(__name__)
55
+
56
+
57
+ def check_cuda_availability():
58
+ """Check if CUDA is available and exit if not."""
59
+ if not torch.cuda.is_available():
60
+ logger.error("CUDA is not available. This script requires a GPU.")
61
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
62
+ sys.exit(1)
63
+ else:
64
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
65
+
66
+
67
+ def make_ocr_message(
68
+ image: Union[Image.Image, Dict[str, Any], str],
69
+ prompt: str = "Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation. If there is an image in the document and image caption is not present, add a small description of the image inside the <img></img> tag; otherwise, add the image caption inside <img></img>. Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>. Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number> or <page_number>9/22</page_number>. Prefer using ☐ and ☑ for check boxes.",
70
+ ) -> List[Dict]:
71
+ """Create chat message for OCR processing."""
72
+ # Convert to PIL Image if needed
73
+ if isinstance(image, Image.Image):
74
+ pil_img = image
75
+ elif isinstance(image, dict) and "bytes" in image:
76
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
77
+ elif isinstance(image, str):
78
+ pil_img = Image.open(image)
79
+ else:
80
+ raise ValueError(f"Unsupported image type: {type(image)}")
81
+
82
+ # Convert to base64 data URI
83
+ buf = io.BytesIO()
84
+ pil_img.save(buf, format="PNG")
85
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
86
+
87
+ # Return message in vLLM format
88
+ return [
89
+ {
90
+ "role": "user",
91
+ "content": [
92
+ {"type": "image_url", "image_url": {"url": data_uri}},
93
+ {"type": "text", "text": prompt},
94
+ ],
95
+ }
96
+ ]
97
+
98
+
99
+ def create_dataset_card(
100
+ source_dataset: str,
101
+ model: str,
102
+ num_samples: int,
103
+ processing_time: str,
104
+ batch_size: int,
105
+ max_model_len: int,
106
+ max_tokens: int,
107
+ gpu_memory_utilization: float,
108
+ image_column: str = "image",
109
+ split: str = "train",
110
+ ) -> str:
111
+ """Create a dataset card documenting the OCR process."""
112
+ model_name = model.split("/")[-1]
113
+ model_size = "3B" if "3B" in model else "1.5B"
114
+
115
+ return f"""---
116
+ viewer: false
117
+ tags:
118
+ - ocr
119
+ - document-processing
120
+ - nanonets
121
+ - nanonets-ocr2
122
+ - markdown
123
+ - uv-script
124
+ - generated
125
+ ---
126
+
127
+ # Document OCR using {model_name}
128
+
129
+ This dataset contains markdown-formatted OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using Nanonets-OCR2-{model_size}.
130
+
131
+ ## Processing Details
132
+
133
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
134
+ - **Model**: [{model}](https://huggingface.co/{model})
135
+ - **Model Size**: {model_size} parameters
136
+ - **Number of Samples**: {num_samples:,}
137
+ - **Processing Time**: {processing_time}
138
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
139
+
140
+ ### Configuration
141
+
142
+ - **Image Column**: `{image_column}`
143
+ - **Output Column**: `markdown`
144
+ - **Dataset Split**: `{split}`
145
+ - **Batch Size**: {batch_size}
146
+ - **Max Model Length**: {max_model_len:,} tokens
147
+ - **Max Output Tokens**: {max_tokens:,}
148
+ - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
149
+
150
+ ## Model Information
151
+
152
+ Nanonets-OCR2-{model_size} is a state-of-the-art document OCR model that excels at:
153
+ - 📐 **LaTeX equations** - Mathematical formulas preserved in LaTeX format
154
+ - 📊 **Tables** - Extracted and formatted as HTML
155
+ - 📝 **Document structure** - Headers, lists, and formatting maintained
156
+ - 🖼️ **Images** - Captions and descriptions included in `<img>` tags
157
+ - ☑️ **Forms** - Checkboxes rendered as ☐/☑
158
+ - 🔖 **Watermarks** - Wrapped in `<watermark>` tags
159
+ - 📄 **Page numbers** - Wrapped in `<page_number>` tags
160
+ - 🌍 **Multilingual** - Supports multiple languages
161
+
162
+ ## Dataset Structure
163
+
164
+ The dataset contains all original columns plus:
165
+ - `markdown`: The extracted text in markdown format with preserved structure
166
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
167
+
168
+ ## Usage
169
+
170
+ ```python
171
+ from datasets import load_dataset
172
+ import json
173
+
174
+ # Load the dataset
175
+ dataset = load_dataset("{{{{output_dataset_id}}}}", split="{split}")
176
+
177
+ # Access the markdown text
178
+ for example in dataset:
179
+ print(example["markdown"])
180
+ break
181
+
182
+ # View all OCR models applied to this dataset
183
+ inference_info = json.loads(dataset[0]["inference_info"])
184
+ for info in inference_info:
185
+ print(f"Column: {{{{info['column_name']}}}} - Model: {{{{info['model_id']}}}}")
186
+ ```
187
+
188
+ ## Reproduction
189
+
190
+ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/datasets/uv-scripts/ocr) Nanonets OCR2 script:
191
+
192
+ ```bash
193
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py \\
194
+ {source_dataset} \\
195
+ <output-dataset> \\
196
+ --model {model} \\
197
+ --image-column {image_column} \\
198
+ --batch-size {batch_size} \\
199
+ --max-model-len {max_model_len} \\
200
+ --max-tokens {max_tokens} \\
201
+ --gpu-memory-utilization {gpu_memory_utilization}
202
+ ```
203
+
204
+ ## Performance
205
+
206
+ - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
207
+ - **GPU Configuration**: vLLM with {gpu_memory_utilization:.0%} GPU memory utilization
208
+
209
+ Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
210
+ """
211
+
212
+
213
+ def main(
214
+ input_dataset: str,
215
+ output_dataset: str,
216
+ image_column: str = "image",
217
+ batch_size: int = None,
218
+ model: str = "nanonets/Nanonets-OCR2-3B",
219
+ max_model_len: int = 8192,
220
+ max_tokens: int = 4096,
221
+ gpu_memory_utilization: float = 0.8,
222
+ hf_token: str = None,
223
+ split: str = "train",
224
+ max_samples: int = None,
225
+ private: bool = False,
226
+ shuffle: bool = False,
227
+ seed: int = 42,
228
+ ):
229
+ """Process images from HF dataset through Nanonets-OCR2 model."""
230
+
231
+ # Auto-set batch size based on model if not specified
232
+ if batch_size is None:
233
+ if "1.5B" in model:
234
+ batch_size = 32
235
+ logger.info("Auto-set batch size to 32 for 1.5B model")
236
+ else: # 3B model
237
+ batch_size = 16
238
+ logger.info("Auto-set batch size to 16 for 3B model")
239
+
240
+ # Check CUDA availability first
241
+ check_cuda_availability()
242
+
243
+ # Track processing start time
244
+ start_time = datetime.now()
245
+
246
+ # Enable HF_TRANSFER for faster downloads
247
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
248
+
249
+ # Login to HF if token provided
250
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
251
+ if HF_TOKEN:
252
+ login(token=HF_TOKEN)
253
+
254
+ # Load dataset
255
+ logger.info(f"Loading dataset: {input_dataset}")
256
+ dataset = load_dataset(input_dataset, split=split)
257
+
258
+ # Validate image column
259
+ if image_column not in dataset.column_names:
260
+ raise ValueError(
261
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
262
+ )
263
+
264
+ # Shuffle if requested
265
+ if shuffle:
266
+ logger.info(f"Shuffling dataset with seed {seed}")
267
+ dataset = dataset.shuffle(seed=seed)
268
+
269
+ # Limit samples if requested
270
+ if max_samples:
271
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
272
+ logger.info(f"Limited to {len(dataset)} samples")
273
+
274
+ # Initialize vLLM
275
+ logger.info(f"Initializing vLLM with model: {model}")
276
+ llm = LLM(
277
+ model=model,
278
+ trust_remote_code=True,
279
+ max_model_len=max_model_len,
280
+ gpu_memory_utilization=gpu_memory_utilization,
281
+ limit_mm_per_prompt={"image": 1},
282
+ )
283
+
284
+ sampling_params = SamplingParams(
285
+ temperature=0.0, # Deterministic for OCR
286
+ max_tokens=max_tokens,
287
+ )
288
+
289
+ # Process images in batches
290
+ all_markdown = []
291
+
292
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
293
+
294
+ # Process in batches to avoid memory issues
295
+ for batch_indices in tqdm(
296
+ partition_all(batch_size, range(len(dataset))),
297
+ total=(len(dataset) + batch_size - 1) // batch_size,
298
+ desc="OCR processing",
299
+ ):
300
+ batch_indices = list(batch_indices)
301
+ batch_images = [dataset[i][image_column] for i in batch_indices]
302
+
303
+ try:
304
+ # Create messages for batch
305
+ batch_messages = [make_ocr_message(img) for img in batch_images]
306
+
307
+ # Process with vLLM
308
+ outputs = llm.chat(batch_messages, sampling_params)
309
+
310
+ # Extract markdown from outputs
311
+ for output in outputs:
312
+ markdown_text = output.outputs[0].text.strip()
313
+ all_markdown.append(markdown_text)
314
+
315
+ except Exception as e:
316
+ logger.error(f"Error processing batch: {e}")
317
+ # Add error placeholders for failed batch
318
+ all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
319
+
320
+ # Add markdown column to dataset
321
+ logger.info("Adding markdown column to dataset")
322
+ dataset = dataset.add_column("markdown", all_markdown)
323
+
324
+ # Handle inference_info tracking
325
+ logger.info("Updating inference_info...")
326
+
327
+ # Check for existing inference_info
328
+ if "inference_info" in dataset.column_names:
329
+ # Parse existing info from first row (all rows have same info)
330
+ try:
331
+ existing_info = json.loads(dataset[0]["inference_info"])
332
+ if not isinstance(existing_info, list):
333
+ existing_info = [existing_info] # Convert old format to list
334
+ except (json.JSONDecodeError, TypeError):
335
+ existing_info = []
336
+ # Remove old column to update it
337
+ dataset = dataset.remove_columns(["inference_info"])
338
+ else:
339
+ existing_info = []
340
+
341
+ # Add new inference info
342
+ new_info = {
343
+ "column_name": "markdown",
344
+ "model_id": model,
345
+ "processing_date": datetime.now().isoformat(),
346
+ "batch_size": batch_size,
347
+ "max_tokens": max_tokens,
348
+ "gpu_memory_utilization": gpu_memory_utilization,
349
+ "max_model_len": max_model_len,
350
+ "script": "nanonets-ocr2.py",
351
+ "script_version": "1.0.0",
352
+ "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py"
353
+ }
354
+ existing_info.append(new_info)
355
+
356
+ # Add updated inference_info column
357
+ info_json = json.dumps(existing_info, ensure_ascii=False)
358
+ dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
359
+
360
+ # Push to hub
361
+ logger.info(f"Pushing to {output_dataset}")
362
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
363
+
364
+ # Calculate processing time
365
+ end_time = datetime.now()
366
+ processing_duration = end_time - start_time
367
+ processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
368
+
369
+ # Create and push dataset card
370
+ logger.info("Creating dataset card...")
371
+ card_content = create_dataset_card(
372
+ source_dataset=input_dataset,
373
+ model=model,
374
+ num_samples=len(dataset),
375
+ processing_time=processing_time,
376
+ batch_size=batch_size,
377
+ max_model_len=max_model_len,
378
+ max_tokens=max_tokens,
379
+ gpu_memory_utilization=gpu_memory_utilization,
380
+ image_column=image_column,
381
+ split=split,
382
+ )
383
+
384
+ card = DatasetCard(card_content)
385
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
386
+ logger.info("✅ Dataset card created and pushed!")
387
+
388
+ logger.info("✅ OCR conversion complete!")
389
+ logger.info(
390
+ f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
391
+ )
392
+
393
+
394
+ if __name__ == "__main__":
395
+ # Show example usage if no arguments
396
+ if len(sys.argv) == 1:
397
+ print("=" * 80)
398
+ print("Nanonets OCR2 to Markdown Converter")
399
+ print("=" * 80)
400
+ print("\nThis script converts document images to structured markdown using")
401
+ print("Nanonets-OCR2 models (1.5B or 3B) with vLLM acceleration.")
402
+ print("\nModel Options:")
403
+ print("- Nanonets-OCR2-3B (default): 3.75B params, best quality")
404
+ print("- Nanonets-OCR2-1.5B-exp: 1.65B params, faster processing")
405
+ print("\nFeatures:")
406
+ print("- LaTeX equation recognition")
407
+ print("- Table extraction and formatting (HTML)")
408
+ print("- Document structure preservation")
409
+ print("- Image descriptions and captions")
410
+ print("- Signature and watermark detection")
411
+ print("- Checkbox recognition (☐/☑)")
412
+ print("- Multilingual support")
413
+ print("\nExample usage:")
414
+ print("\n1. Basic OCR conversion (3B model, best quality):")
415
+ print(" uv run nanonets-ocr2.py document-images markdown-docs")
416
+ print("\n2. Fast processing with 1.5B model:")
417
+ print(" uv run nanonets-ocr2.py documents output \\")
418
+ print(" --model nanonets/Nanonets-OCR2-1.5B-exp")
419
+ print("\n3. With custom settings:")
420
+ print(" uv run nanonets-ocr2.py scanned-pdfs extracted-text \\")
421
+ print(" --image-column page \\")
422
+ print(" --batch-size 32 \\")
423
+ print(" --gpu-memory-utilization 0.8")
424
+ print("\n4. Process a subset for testing:")
425
+ print(" uv run nanonets-ocr2.py large-dataset test-output --max-samples 10")
426
+ print("\n5. Random sample from ordered dataset:")
427
+ print(" uv run nanonets-ocr2.py ordered-dataset random-test \\")
428
+ print(" --max-samples 50 --shuffle")
429
+ print("\n6. Running on HF Jobs:")
430
+ print(" hf jobs uv run --flavor l4x1 \\")
431
+ print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
432
+ print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py \\")
433
+ print(" your-document-dataset \\")
434
+ print(" your-markdown-output")
435
+ print("\n" + "=" * 80)
436
+ print("\nFor full help, run: uv run nanonets-ocr2.py --help")
437
+ sys.exit(0)
438
+
439
+ parser = argparse.ArgumentParser(
440
+ description="OCR images to markdown using Nanonets-OCR2 models",
441
+ formatter_class=argparse.RawDescriptionHelpFormatter,
442
+ epilog="""
443
+ Models:
444
+ nanonets/Nanonets-OCR2-3B (default) - 3.75B params, best quality
445
+ nanonets/Nanonets-OCR2-1.5B-exp - 1.65B params, faster
446
+
447
+ Examples:
448
+ # Basic usage (3B model)
449
+ uv run nanonets-ocr2.py my-images-dataset ocr-results
450
+
451
+ # Fast processing with 1.5B model
452
+ uv run nanonets-ocr2.py documents output --model nanonets/Nanonets-OCR2-1.5B-exp
453
+
454
+ # With specific image column
455
+ uv run nanonets-ocr2.py documents extracted-text --image-column scan
456
+
457
+ # Process subset for testing
458
+ uv run nanonets-ocr2.py large-dataset test-output --max-samples 100
459
+
460
+ # Random sample from ordered dataset
461
+ uv run nanonets-ocr2.py ordered-dataset random-sample --max-samples 50 --shuffle
462
+ """,
463
+ )
464
+
465
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
466
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
467
+ parser.add_argument(
468
+ "--image-column",
469
+ default="image",
470
+ help="Column containing images (default: image)",
471
+ )
472
+ parser.add_argument(
473
+ "--batch-size",
474
+ type=int,
475
+ default=None,
476
+ help="Batch size for processing (default: auto - 16 for 3B, 32 for 1.5B)",
477
+ )
478
+ parser.add_argument(
479
+ "--model",
480
+ default="nanonets/Nanonets-OCR2-3B",
481
+ choices=["nanonets/Nanonets-OCR2-3B", "nanonets/Nanonets-OCR2-1.5B-exp"],
482
+ help="Model to use (default: Nanonets-OCR2-3B for best quality)",
483
+ )
484
+ parser.add_argument(
485
+ "--max-model-len",
486
+ type=int,
487
+ default=8192,
488
+ help="Maximum model context length (default: 8192)",
489
+ )
490
+ parser.add_argument(
491
+ "--max-tokens",
492
+ type=int,
493
+ default=4096,
494
+ help="Maximum tokens to generate (default: 4096)",
495
+ )
496
+ parser.add_argument(
497
+ "--gpu-memory-utilization",
498
+ type=float,
499
+ default=0.8,
500
+ help="GPU memory utilization (default: 0.8)",
501
+ )
502
+ parser.add_argument("--hf-token", help="Hugging Face API token")
503
+ parser.add_argument(
504
+ "--split", default="train", help="Dataset split to use (default: train)"
505
+ )
506
+ parser.add_argument(
507
+ "--max-samples",
508
+ type=int,
509
+ help="Maximum number of samples to process (for testing)",
510
+ )
511
+ parser.add_argument(
512
+ "--private", action="store_true", help="Make output dataset private"
513
+ )
514
+ parser.add_argument(
515
+ "--shuffle",
516
+ action="store_true",
517
+ help="Shuffle the dataset before processing (useful for random sampling)",
518
+ )
519
+ parser.add_argument(
520
+ "--seed",
521
+ type=int,
522
+ default=42,
523
+ help="Random seed for shuffling (default: 42)",
524
+ )
525
+
526
+ args = parser.parse_args()
527
+
528
+ main(
529
+ input_dataset=args.input_dataset,
530
+ output_dataset=args.output_dataset,
531
+ image_column=args.image_column,
532
+ batch_size=args.batch_size,
533
+ model=args.model,
534
+ max_model_len=args.max_model_len,
535
+ max_tokens=args.max_tokens,
536
+ gpu_memory_utilization=args.gpu_memory_utilization,
537
+ hf_token=args.hf_token,
538
+ split=args.split,
539
+ max_samples=args.max_samples,
540
+ private=args.private,
541
+ shuffle=args.shuffle,
542
+ seed=args.seed,
543
+ )