Commit 
							
							·
						
						d272f1c
	
1
								Parent(s):
							
							011bc8a
								
update
Browse files- classify-dataset.py +356 -123
    	
        classify-dataset.py
    CHANGED
    
    | @@ -3,7 +3,7 @@ | |
| 3 | 
             
            # requires-python = ">=3.10"
         | 
| 4 | 
             
            # dependencies = [
         | 
| 5 | 
             
            #     "vllm>=0.6.6",
         | 
| 6 | 
            -
            #     "transformers",
         | 
| 7 | 
             
            #     "torch",
         | 
| 8 | 
             
            #     "datasets",
         | 
| 9 | 
             
            #     "huggingface-hub[hf_transfer]",
         | 
| @@ -36,42 +36,95 @@ import argparse | |
| 36 | 
             
            import logging
         | 
| 37 | 
             
            import os
         | 
| 38 | 
             
            import sys
         | 
| 39 | 
            -
            from typing import List | 
| 40 |  | 
| 41 | 
             
            import torch
         | 
| 42 | 
            -
            from datasets import load_dataset | 
| 43 | 
             
            from huggingface_hub import HfApi, get_token
         | 
|  | |
| 44 | 
             
            from vllm import LLM, SamplingParams
         | 
| 45 | 
             
            from vllm.sampling_params import GuidedDecodingParams
         | 
| 46 |  | 
| 47 | 
             
            # Default model - SmolLM3 for good balance of speed and quality
         | 
| 48 | 
             
            DEFAULT_MODEL = "HuggingFaceTB/SmolLM3-3B"
         | 
| 49 |  | 
| 50 | 
            -
             | 
| 51 | 
            -
             | 
| 52 | 
            -
                 | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 53 |  | 
| 54 | 
            -
            Text: {text}
         | 
| 55 | 
            -
             | 
| 56 | 
            -
            Label:""",
         | 
| 57 | 
            -
                
         | 
| 58 | 
            -
                "detailed": """Task: Classify the following text into EXACTLY ONE of these categories.
         | 
| 59 | 
            -
            Available categories: {labels}
         | 
| 60 |  | 
| 61 | 
            -
             | 
| 62 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 63 |  | 
| 64 | 
            -
             | 
| 65 | 
            -
                
         | 
| 66 | 
            -
                "reasoning": """Analyze the following text and determine which category it belongs to.
         | 
| 67 | 
            -
            Available categories: {labels}
         | 
| 68 |  | 
| 69 | 
            -
            Text  | 
| 70 | 
            -
            {text}
         | 
| 71 |  | 
| 72 | 
            -
             | 
| 73 | 
            -
             | 
| 74 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 75 |  | 
| 76 | 
             
            # Minimum text length for valid classification
         | 
| 77 | 
             
            MIN_TEXT_LENGTH = 3
         | 
| @@ -79,7 +132,79 @@ MIN_TEXT_LENGTH = 3 | |
| 79 | 
             
            # Maximum text length (in characters) to avoid context overflow
         | 
| 80 | 
             
            MAX_TEXT_LENGTH = 4000
         | 
| 81 |  | 
| 82 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 83 | 
             
            logger = logging.getLogger(__name__)
         | 
| 84 |  | 
| 85 |  | 
| @@ -87,87 +212,99 @@ def parse_args(): | |
| 87 | 
             
                parser = argparse.ArgumentParser(
         | 
| 88 | 
             
                    description="Classify text in HuggingFace datasets using vLLM with structured outputs",
         | 
| 89 | 
             
                    formatter_class=argparse.RawDescriptionHelpFormatter,
         | 
| 90 | 
            -
                    epilog=__doc__
         | 
| 91 | 
             
                )
         | 
| 92 | 
            -
             | 
| 93 | 
             
                # Required arguments
         | 
| 94 | 
             
                parser.add_argument(
         | 
| 95 | 
             
                    "--input-dataset",
         | 
| 96 | 
             
                    type=str,
         | 
| 97 | 
             
                    required=True,
         | 
| 98 | 
            -
                    help="Input dataset ID on Hugging Face Hub"
         | 
| 99 | 
             
                )
         | 
| 100 | 
             
                parser.add_argument(
         | 
| 101 | 
            -
                    "--column",
         | 
| 102 | 
            -
                    type=str,
         | 
| 103 | 
            -
                    required=True,
         | 
| 104 | 
            -
                    help="Name of the text column to classify"
         | 
| 105 | 
             
                )
         | 
| 106 | 
             
                parser.add_argument(
         | 
| 107 | 
             
                    "--labels",
         | 
| 108 | 
             
                    type=str,
         | 
| 109 | 
             
                    required=True,
         | 
| 110 | 
            -
                    help="Comma-separated list of classification labels (e.g., 'positive,negative')"
         | 
| 111 | 
             
                )
         | 
| 112 | 
             
                parser.add_argument(
         | 
| 113 | 
             
                    "--output-dataset",
         | 
| 114 | 
             
                    type=str,
         | 
| 115 | 
             
                    required=True,
         | 
| 116 | 
            -
                    help="Output dataset ID on Hugging Face Hub"
         | 
| 117 | 
             
                )
         | 
| 118 | 
            -
             | 
| 119 | 
             
                # Optional arguments
         | 
| 120 | 
             
                parser.add_argument(
         | 
| 121 | 
             
                    "--model",
         | 
| 122 | 
             
                    type=str,
         | 
| 123 | 
             
                    default=DEFAULT_MODEL,
         | 
| 124 | 
            -
                    help=f"Model to use for classification (default: {DEFAULT_MODEL})"
         | 
| 125 | 
             
                )
         | 
| 126 | 
             
                # Removed --batch-size argument as vLLM handles batching internally
         | 
| 127 | 
             
                parser.add_argument(
         | 
| 128 | 
            -
                    "-- | 
| 129 | 
             
                    type=str,
         | 
| 130 | 
            -
                     | 
| 131 | 
            -
                     | 
| 132 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
| 133 | 
             
                )
         | 
| 134 | 
             
                parser.add_argument(
         | 
| 135 | 
             
                    "--max-samples",
         | 
| 136 | 
             
                    type=int,
         | 
| 137 | 
             
                    default=None,
         | 
| 138 | 
            -
                    help="Maximum number of samples to process (for testing)"
         | 
| 139 | 
             
                )
         | 
| 140 | 
             
                parser.add_argument(
         | 
| 141 | 
             
                    "--hf-token",
         | 
| 142 | 
             
                    type=str,
         | 
| 143 | 
             
                    default=None,
         | 
| 144 | 
            -
                    help="Hugging Face API token (default: auto-detect from HF_TOKEN env var or huggingface-cli login)"
         | 
| 145 | 
             
                )
         | 
| 146 | 
             
                parser.add_argument(
         | 
| 147 | 
             
                    "--split",
         | 
| 148 | 
             
                    type=str,
         | 
| 149 | 
             
                    default="train",
         | 
| 150 | 
            -
                    help="Dataset split to process (default: train)"
         | 
| 151 | 
             
                )
         | 
| 152 | 
             
                parser.add_argument(
         | 
| 153 | 
             
                    "--temperature",
         | 
| 154 | 
             
                    type=float,
         | 
| 155 | 
             
                    default=0.1,
         | 
| 156 | 
            -
                    help="Temperature for generation (default: 0.1)"
         | 
| 157 | 
             
                )
         | 
| 158 | 
             
                parser.add_argument(
         | 
| 159 | 
             
                    "--max-tokens",
         | 
| 160 | 
             
                    type=int,
         | 
| 161 | 
            -
                    default= | 
| 162 | 
            -
                    help="Maximum tokens to generate (default:  | 
| 163 | 
             
                )
         | 
| 164 | 
             
                parser.add_argument(
         | 
| 165 | 
             
                    "--guided-backend",
         | 
| 166 | 
             
                    type=str,
         | 
| 167 | 
             
                    default="outlines",
         | 
| 168 | 
            -
                    help="Guided decoding backend (default: outlines)"
         | 
| 169 | 
             
                )
         | 
| 170 | 
            -
                
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 171 | 
             
                return parser.parse_args()
         | 
| 172 |  | 
| 173 |  | 
| @@ -175,63 +312,62 @@ def preprocess_text(text: str) -> str: | |
| 175 | 
             
                """Preprocess text for classification."""
         | 
| 176 | 
             
                if not text or not isinstance(text, str):
         | 
| 177 | 
             
                    return ""
         | 
| 178 | 
            -
             | 
| 179 | 
             
                # Strip whitespace
         | 
| 180 | 
             
                text = text.strip()
         | 
| 181 | 
            -
             | 
| 182 | 
             
                # Truncate if too long
         | 
| 183 | 
             
                if len(text) > MAX_TEXT_LENGTH:
         | 
| 184 | 
            -
                    text = text[:MAX_TEXT_LENGTH] | 
| 185 | 
            -
             | 
| 186 | 
             
                return text
         | 
| 187 |  | 
| 188 |  | 
| 189 | 
             
            def validate_text(text: str) -> bool:
         | 
| 190 | 
             
                """Check if text is valid for classification."""
         | 
| 191 | 
            -
                 | 
| 192 | 
            -
                    return False
         | 
| 193 | 
            -
                return True
         | 
| 194 |  | 
| 195 |  | 
| 196 | 
             
            def prepare_prompts(
         | 
| 197 | 
            -
                texts: List[str],
         | 
| 198 | 
            -
                 | 
| 199 | 
            -
                prompt_template: str
         | 
| 200 | 
             
            ) -> tuple[List[str], List[int]]:
         | 
| 201 | 
            -
                """Prepare prompts for classification, filtering invalid texts."""
         | 
| 202 | 
             
                prompts = []
         | 
| 203 | 
             
                valid_indices = []
         | 
| 204 | 
            -
             | 
| 205 | 
             
                for i, text in enumerate(texts):
         | 
| 206 | 
             
                    processed_text = preprocess_text(text)
         | 
| 207 | 
             
                    if validate_text(processed_text):
         | 
| 208 | 
            -
                         | 
| 209 | 
            -
             | 
| 210 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 211 | 
             
                        )
         | 
| 212 | 
             
                        prompts.append(prompt)
         | 
| 213 | 
             
                        valid_indices.append(i)
         | 
| 214 | 
            -
             | 
| 215 | 
             
                return prompts, valid_indices
         | 
| 216 |  | 
| 217 |  | 
| 218 | 
             
            def main():
         | 
| 219 | 
             
                args = parse_args()
         | 
| 220 | 
            -
             | 
| 221 | 
             
                # Check authentication early
         | 
| 222 | 
             
                logger.info("Checking authentication...")
         | 
| 223 | 
            -
                token = args.hf_token
         | 
| 224 | 
            -
             | 
| 225 | 
            -
                    # Try to get token from environment or huggingface-cli login
         | 
| 226 | 
            -
                    token = os.environ.get("HF_TOKEN") or get_token()
         | 
| 227 | 
            -
                
         | 
| 228 | 
             
                if not token:
         | 
| 229 | 
             
                    logger.error("No authentication token found. Please either:")
         | 
| 230 | 
             
                    logger.error("1. Run 'huggingface-cli login'")
         | 
| 231 | 
             
                    logger.error("2. Set HF_TOKEN environment variable")
         | 
| 232 | 
             
                    logger.error("3. Pass --hf-token argument")
         | 
| 233 | 
             
                    sys.exit(1)
         | 
| 234 | 
            -
             | 
| 235 | 
             
                # Validate token by checking who we are
         | 
| 236 | 
             
                try:
         | 
| 237 | 
             
                    api = HfApi(token=token)
         | 
| @@ -241,15 +377,15 @@ def main(): | |
| 241 | 
             
                    logger.error(f"Authentication failed: {e}")
         | 
| 242 | 
             
                    logger.error("Please check your token is valid")
         | 
| 243 | 
             
                    sys.exit(1)
         | 
| 244 | 
            -
             | 
| 245 | 
             
                # Check CUDA availability
         | 
| 246 | 
             
                if not torch.cuda.is_available():
         | 
| 247 | 
             
                    logger.error("CUDA is not available. This script requires a GPU.")
         | 
| 248 | 
             
                    logger.error("Please run on a machine with GPU support or use HF Jobs.")
         | 
| 249 | 
             
                    sys.exit(1)
         | 
| 250 | 
            -
             | 
| 251 | 
             
                logger.info(f"CUDA available. Using device: {torch.cuda.get_device_name(0)}")
         | 
| 252 | 
            -
             | 
| 253 | 
             
                # Parse and validate labels
         | 
| 254 | 
             
                labels = [label.strip() for label in args.labels.split(",")]
         | 
| 255 | 
             
                if len(labels) < 2:
         | 
| @@ -257,30 +393,52 @@ def main(): | |
| 257 | 
             
                    sys.exit(1)
         | 
| 258 | 
             
                logger.info(f"Classification labels: {labels}")
         | 
| 259 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 260 | 
             
                # Load dataset
         | 
| 261 | 
             
                logger.info(f"Loading dataset: {args.input_dataset}")
         | 
| 262 | 
             
                try:
         | 
| 263 | 
             
                    dataset = load_dataset(args.input_dataset, split=args.split)
         | 
| 264 | 
            -
                    
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 265 | 
             
                    # Limit samples if specified
         | 
| 266 | 
             
                    if args.max_samples:
         | 
| 267 | 
             
                        dataset = dataset.select(range(min(args.max_samples, len(dataset))))
         | 
| 268 | 
             
                        logger.info(f"Limited dataset to {len(dataset)} samples")
         | 
| 269 | 
            -
             | 
| 270 | 
            -
             | 
| 271 | 
             
                except Exception as e:
         | 
| 272 | 
             
                    logger.error(f"Failed to load dataset: {e}")
         | 
| 273 | 
             
                    sys.exit(1)
         | 
| 274 | 
            -
             | 
| 275 | 
             
                # Verify column exists
         | 
| 276 | 
             
                if args.column not in dataset.column_names:
         | 
| 277 | 
             
                    logger.error(f"Column '{args.column}' not found in dataset.")
         | 
| 278 | 
             
                    logger.error(f"Available columns: {dataset.column_names}")
         | 
| 279 | 
             
                    sys.exit(1)
         | 
| 280 | 
            -
             | 
| 281 | 
             
                # Extract texts
         | 
| 282 | 
             
                texts = dataset[args.column]
         | 
| 283 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 284 | 
             
                # Initialize vLLM
         | 
| 285 | 
             
                logger.info(f"Initializing vLLM with model: {args.model}")
         | 
| 286 | 
             
                logger.info(f"Using guided decoding backend: {args.guided_backend}")
         | 
| @@ -295,85 +453,139 @@ def main(): | |
| 295 | 
             
                except Exception as e:
         | 
| 296 | 
             
                    logger.error(f"Failed to initialize vLLM: {e}")
         | 
| 297 | 
             
                    sys.exit(1)
         | 
| 298 | 
            -
             | 
| 299 | 
            -
                # Set up  | 
| 300 | 
            -
                 | 
| 301 | 
            -
             | 
| 302 | 
            -
             | 
| 303 | 
            -
             | 
| 304 | 
            -
             | 
| 305 | 
            -
                     | 
| 306 | 
            -
                     | 
| 307 | 
            -
                 | 
| 308 | 
            -
             | 
| 309 | 
            -
             | 
| 310 | 
            -
             | 
| 311 | 
            -
             | 
| 312 | 
            -
             | 
| 313 | 
            -
             | 
|  | |
|  | |
|  | |
| 314 | 
             
                # Prepare all prompts
         | 
| 315 | 
             
                logger.info("Preparing prompts for classification...")
         | 
| 316 | 
            -
                all_prompts, valid_indices = prepare_prompts(texts, labels,  | 
| 317 | 
            -
             | 
| 318 | 
             
                if not all_prompts:
         | 
| 319 | 
             
                    logger.error("No valid texts found for classification.")
         | 
| 320 | 
             
                    sys.exit(1)
         | 
| 321 | 
            -
             | 
| 322 | 
             
                logger.info(f"Prepared {len(all_prompts)} valid prompts out of {len(texts)} texts")
         | 
| 323 | 
            -
             | 
| 324 | 
             
                # Let vLLM handle batching internally
         | 
| 325 | 
             
                logger.info("Starting classification (vLLM will handle batching internally)...")
         | 
| 326 | 
            -
             | 
| 327 | 
             
                try:
         | 
| 328 | 
             
                    # Generate all classifications at once - vLLM handles batching
         | 
| 329 | 
             
                    outputs = llm.generate(all_prompts, sampling_params)
         | 
| 330 | 
            -
             | 
| 331 | 
            -
                    #  | 
| 332 | 
            -
                     | 
| 333 | 
            -
             | 
| 334 | 
            -
                         | 
| 335 | 
            -
                         | 
| 336 | 
            -
                         | 
| 337 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 338 | 
             
                    # Count statistics
         | 
| 339 | 
            -
                    valid_texts = len(valid_indices)
         | 
| 340 | 
             
                    total_texts = len(texts)
         | 
| 341 | 
            -
             | 
| 342 | 
             
                except Exception as e:
         | 
| 343 | 
             
                    logger.error(f"Classification failed: {e}")
         | 
| 344 | 
             
                    sys.exit(1)
         | 
| 345 | 
            -
             | 
| 346 | 
            -
                # Add  | 
| 347 | 
             
                dataset = dataset.add_column("classification", all_classifications)
         | 
| 348 |  | 
|  | |
|  | |
|  | |
|  | |
| 349 | 
             
                # Calculate statistics
         | 
| 350 | 
             
                none_count = total_texts - valid_texts
         | 
| 351 | 
             
                if none_count > 0:
         | 
| 352 | 
            -
                    logger.warning( | 
| 353 | 
            -
             | 
|  | |
|  | |
| 354 | 
             
                # Show classification distribution
         | 
| 355 | 
             
                label_counts = {label: all_classifications.count(label) for label in labels}
         | 
|  | |
|  | |
|  | |
|  | |
| 356 | 
             
                logger.info("Classification distribution:")
         | 
| 357 | 
             
                for label, count in label_counts.items():
         | 
| 358 | 
             
                    percentage = count / total_texts * 100 if total_texts > 0 else 0
         | 
| 359 | 
             
                    logger.info(f"  {label}: {count} ({percentage:.1f}%)")
         | 
| 360 | 
            -
                if none_count > 0:
         | 
| 361 | 
            -
                    none_percentage = none_count / total_texts * 100
         | 
| 362 | 
            -
                    logger.info(f"  Invalid/Skipped: {none_count} ({none_percentage:.1f}%)")
         | 
| 363 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 364 | 
             
                # Log success rate
         | 
| 365 | 
             
                success_rate = (valid_texts / total_texts * 100) if total_texts > 0 else 0
         | 
| 366 | 
             
                logger.info(f"Classification success rate: {success_rate:.1f}%")
         | 
| 367 | 
            -
             | 
| 368 | 
             
                # Save to Hub (token already validated at start)
         | 
| 369 | 
             
                logger.info(f"Pushing dataset to Hub: {args.output_dataset}")
         | 
| 370 | 
             
                try:
         | 
| 371 | 
             
                    dataset.push_to_hub(
         | 
| 372 | 
             
                        args.output_dataset,
         | 
| 373 | 
             
                        token=token,
         | 
| 374 | 
            -
                        commit_message=f"Add classifications using {args.model} with structured outputs"
         | 
|  | |
|  | |
|  | |
| 375 | 
             
                    )
         | 
| 376 | 
            -
                    logger.info(f"Successfully pushed to: https://huggingface.co/datasets/{args.output_dataset}")
         | 
| 377 | 
             
                except Exception as e:
         | 
| 378 | 
             
                    logger.error(f"Failed to push to Hub: {e}")
         | 
| 379 | 
             
                    sys.exit(1)
         | 
| @@ -381,7 +593,28 @@ def main(): | |
| 381 |  | 
| 382 | 
             
            if __name__ == "__main__":
         | 
| 383 | 
             
                if len(sys.argv) == 1:
         | 
| 384 | 
            -
                    print("Example  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 385 | 
             
                    print("hf jobs uv run \\")
         | 
| 386 | 
             
                    print("  --flavor l4x1 \\")
         | 
| 387 | 
             
                    print("  --image vllm/vllm-openai:latest \\")
         | 
| @@ -391,5 +624,5 @@ if __name__ == "__main__": | |
| 391 | 
             
                    print("  --labels 'positive,negative' \\")
         | 
| 392 | 
             
                    print("  --output-dataset user/imdb-classified")
         | 
| 393 | 
             
                    sys.exit(0)
         | 
| 394 | 
            -
             | 
| 395 | 
            -
                main()
         | 
|  | |
| 3 | 
             
            # requires-python = ">=3.10"
         | 
| 4 | 
             
            # dependencies = [
         | 
| 5 | 
             
            #     "vllm>=0.6.6",
         | 
| 6 | 
            +
            #     "transformers>=4.53.0",
         | 
| 7 | 
             
            #     "torch",
         | 
| 8 | 
             
            #     "datasets",
         | 
| 9 | 
             
            #     "huggingface-hub[hf_transfer]",
         | 
|  | |
| 36 | 
             
            import logging
         | 
| 37 | 
             
            import os
         | 
| 38 | 
             
            import sys
         | 
| 39 | 
            +
            from typing import List
         | 
| 40 |  | 
| 41 | 
             
            import torch
         | 
| 42 | 
            +
            from datasets import load_dataset
         | 
| 43 | 
             
            from huggingface_hub import HfApi, get_token
         | 
| 44 | 
            +
            from transformers import AutoTokenizer
         | 
| 45 | 
             
            from vllm import LLM, SamplingParams
         | 
| 46 | 
             
            from vllm.sampling_params import GuidedDecodingParams
         | 
| 47 |  | 
| 48 | 
             
            # Default model - SmolLM3 for good balance of speed and quality
         | 
| 49 | 
             
            DEFAULT_MODEL = "HuggingFaceTB/SmolLM3-3B"
         | 
| 50 |  | 
| 51 | 
            +
            def parse_label_descriptions(desc_string: str) -> dict:
         | 
| 52 | 
            +
                """Parse label descriptions from CLI format 'label1:desc1,label2:desc2'."""
         | 
| 53 | 
            +
                if not desc_string:
         | 
| 54 | 
            +
                    return {}
         | 
| 55 | 
            +
                
         | 
| 56 | 
            +
                descriptions = {}
         | 
| 57 | 
            +
                # Split by comma, but be careful about commas in descriptions
         | 
| 58 | 
            +
                parts = desc_string.split(',')
         | 
| 59 | 
            +
                
         | 
| 60 | 
            +
                current_label = None
         | 
| 61 | 
            +
                current_desc_parts = []
         | 
| 62 | 
            +
                
         | 
| 63 | 
            +
                for part in parts:
         | 
| 64 | 
            +
                    if ':' in part and not current_label:
         | 
| 65 | 
            +
                        # New label:description pair
         | 
| 66 | 
            +
                        label, desc = part.split(':', 1)
         | 
| 67 | 
            +
                        current_label = label.strip()
         | 
| 68 | 
            +
                        current_desc_parts = [desc.strip()]
         | 
| 69 | 
            +
                    elif ':' in part and current_label:
         | 
| 70 | 
            +
                        # Save previous label and start new one
         | 
| 71 | 
            +
                        descriptions[current_label] = ','.join(current_desc_parts)
         | 
| 72 | 
            +
                        label, desc = part.split(':', 1)
         | 
| 73 | 
            +
                        current_label = label.strip()
         | 
| 74 | 
            +
                        current_desc_parts = [desc.strip()]
         | 
| 75 | 
            +
                    else:
         | 
| 76 | 
            +
                        # Continuation of previous description (had comma in it)
         | 
| 77 | 
            +
                        current_desc_parts.append(part.strip())
         | 
| 78 | 
            +
                
         | 
| 79 | 
            +
                # Don't forget the last one
         | 
| 80 | 
            +
                if current_label:
         | 
| 81 | 
            +
                    descriptions[current_label] = ','.join(current_desc_parts)
         | 
| 82 | 
            +
                
         | 
| 83 | 
            +
                return descriptions
         | 
| 84 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 85 |  | 
| 86 | 
            +
            def create_messages(text: str, labels: List[str], label_descriptions: dict = None, enable_reasoning: bool = False) -> List[dict]:
         | 
| 87 | 
            +
                """Create messages for chat template with optional label descriptions."""
         | 
| 88 | 
            +
                
         | 
| 89 | 
            +
                # Build the classification prompt
         | 
| 90 | 
            +
                if label_descriptions:
         | 
| 91 | 
            +
                    # Format with descriptions
         | 
| 92 | 
            +
                    categories_text = "Categories:\n"
         | 
| 93 | 
            +
                    for label in labels:
         | 
| 94 | 
            +
                        desc = label_descriptions.get(label, "")
         | 
| 95 | 
            +
                        if desc:
         | 
| 96 | 
            +
                            categories_text += f"- {label}: {desc}\n"
         | 
| 97 | 
            +
                        else:
         | 
| 98 | 
            +
                            categories_text += f"- {label}\n"
         | 
| 99 | 
            +
                else:
         | 
| 100 | 
            +
                    # Simple format without descriptions
         | 
| 101 | 
            +
                    categories_text = f"Categories: {', '.join(labels)}"
         | 
| 102 | 
            +
                
         | 
| 103 | 
            +
                if enable_reasoning:
         | 
| 104 | 
            +
                    # Reasoning mode: allow thinking and request JSON output
         | 
| 105 | 
            +
                    user_content = f"""Classify this text into one of these categories:
         | 
| 106 |  | 
| 107 | 
            +
            {categories_text}
         | 
|  | |
|  | |
|  | |
| 108 |  | 
| 109 | 
            +
            Text: {text}
         | 
|  | |
| 110 |  | 
| 111 | 
            +
            Think through your classification step by step, then provide your final answer in this JSON format:
         | 
| 112 | 
            +
            {{"label": "your_chosen_label"}}"""
         | 
| 113 | 
            +
                    
         | 
| 114 | 
            +
                    system_content = "You are a helpful classification assistant that thinks step by step."
         | 
| 115 | 
            +
                else:
         | 
| 116 | 
            +
                    # Structured output mode: fast classification
         | 
| 117 | 
            +
                    if label_descriptions:
         | 
| 118 | 
            +
                        user_content = f"Classify this text into one of these categories:\n\n{categories_text}\nText: {text}\n\nCategory:"
         | 
| 119 | 
            +
                    else:
         | 
| 120 | 
            +
                        user_content = f"Classify this text as one of: {', '.join(labels)}\n\nText: {text}\n\nLabel:"
         | 
| 121 | 
            +
                    
         | 
| 122 | 
            +
                    system_content = "You are a helpful classification assistant. /no_think"
         | 
| 123 | 
            +
                
         | 
| 124 | 
            +
                return [
         | 
| 125 | 
            +
                    {"role": "system", "content": system_content},
         | 
| 126 | 
            +
                    {"role": "user", "content": user_content}
         | 
| 127 | 
            +
                ]
         | 
| 128 |  | 
| 129 | 
             
            # Minimum text length for valid classification
         | 
| 130 | 
             
            MIN_TEXT_LENGTH = 3
         | 
|  | |
| 132 | 
             
            # Maximum text length (in characters) to avoid context overflow
         | 
| 133 | 
             
            MAX_TEXT_LENGTH = 4000
         | 
| 134 |  | 
| 135 | 
            +
             | 
| 136 | 
            +
            def parse_reasoning_output(output: str, valid_labels: List[str]) -> tuple[str, str, bool]:
         | 
| 137 | 
            +
                """Parse reasoning output to extract label from JSON after </think> tag.
         | 
| 138 | 
            +
                
         | 
| 139 | 
            +
                Returns:
         | 
| 140 | 
            +
                    tuple: (label or None, full reasoning text, parsing_success)
         | 
| 141 | 
            +
                """
         | 
| 142 | 
            +
                import json
         | 
| 143 | 
            +
                
         | 
| 144 | 
            +
                # Find the </think> tag
         | 
| 145 | 
            +
                think_end = output.find("</think>")
         | 
| 146 | 
            +
                
         | 
| 147 | 
            +
                if think_end != -1:
         | 
| 148 | 
            +
                    # Extract everything after </think>
         | 
| 149 | 
            +
                    json_part = output[think_end + len("</think>"):].strip()
         | 
| 150 | 
            +
                    reasoning = output[:think_end + len("</think>")]
         | 
| 151 | 
            +
                else:
         | 
| 152 | 
            +
                    # No think tags, look for JSON in the output
         | 
| 153 | 
            +
                    # Try to find JSON by looking for {
         | 
| 154 | 
            +
                    json_start = output.find("{")
         | 
| 155 | 
            +
                    if json_start != -1:
         | 
| 156 | 
            +
                        json_part = output[json_start:].strip()
         | 
| 157 | 
            +
                        reasoning = output[:json_start].strip() if json_start > 0 else ""
         | 
| 158 | 
            +
                    else:
         | 
| 159 | 
            +
                        json_part = output
         | 
| 160 | 
            +
                        reasoning = output
         | 
| 161 | 
            +
                
         | 
| 162 | 
            +
                # Try to parse JSON
         | 
| 163 | 
            +
                try:
         | 
| 164 | 
            +
                    # Find the first complete JSON object
         | 
| 165 | 
            +
                    if "{" in json_part:
         | 
| 166 | 
            +
                        # Extract just the JSON object
         | 
| 167 | 
            +
                        json_str = json_part[json_part.find("{"):]
         | 
| 168 | 
            +
                        # Find the matching closing brace
         | 
| 169 | 
            +
                        brace_count = 0
         | 
| 170 | 
            +
                        end_pos = 0
         | 
| 171 | 
            +
                        for i, char in enumerate(json_str):
         | 
| 172 | 
            +
                            if char == "{":
         | 
| 173 | 
            +
                                brace_count += 1
         | 
| 174 | 
            +
                            elif char == "}":
         | 
| 175 | 
            +
                                brace_count -= 1
         | 
| 176 | 
            +
                                if brace_count == 0:
         | 
| 177 | 
            +
                                    end_pos = i + 1
         | 
| 178 | 
            +
                                    break
         | 
| 179 | 
            +
                        
         | 
| 180 | 
            +
                        if end_pos > 0:
         | 
| 181 | 
            +
                            json_str = json_str[:end_pos]
         | 
| 182 | 
            +
                            data = json.loads(json_str)
         | 
| 183 | 
            +
                            label = data.get("label", "")
         | 
| 184 | 
            +
                            
         | 
| 185 | 
            +
                            # Validate label
         | 
| 186 | 
            +
                            if label in valid_labels:
         | 
| 187 | 
            +
                                return label, output, True
         | 
| 188 | 
            +
                            else:
         | 
| 189 | 
            +
                                logger.warning(f"Parsed label '{label}' not in valid labels: {valid_labels}")
         | 
| 190 | 
            +
                                return None, output, False
         | 
| 191 | 
            +
                        else:
         | 
| 192 | 
            +
                            logger.warning("Could not find complete JSON object")
         | 
| 193 | 
            +
                            return None, output, False
         | 
| 194 | 
            +
                    else:
         | 
| 195 | 
            +
                        logger.warning("No JSON found in output")
         | 
| 196 | 
            +
                        return None, output, False
         | 
| 197 | 
            +
                        
         | 
| 198 | 
            +
                except json.JSONDecodeError as e:
         | 
| 199 | 
            +
                    logger.warning(f"JSON parsing error: {e}")
         | 
| 200 | 
            +
                    return None, output, False
         | 
| 201 | 
            +
                except Exception as e:
         | 
| 202 | 
            +
                    logger.warning(f"Unexpected error parsing output: {e}")
         | 
| 203 | 
            +
                    return None, output, False
         | 
| 204 | 
            +
             | 
| 205 | 
            +
            logging.basicConfig(
         | 
| 206 | 
            +
                level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
         | 
| 207 | 
            +
            )
         | 
| 208 | 
             
            logger = logging.getLogger(__name__)
         | 
| 209 |  | 
| 210 |  | 
|  | |
| 212 | 
             
                parser = argparse.ArgumentParser(
         | 
| 213 | 
             
                    description="Classify text in HuggingFace datasets using vLLM with structured outputs",
         | 
| 214 | 
             
                    formatter_class=argparse.RawDescriptionHelpFormatter,
         | 
| 215 | 
            +
                    epilog=__doc__,
         | 
| 216 | 
             
                )
         | 
| 217 | 
            +
             | 
| 218 | 
             
                # Required arguments
         | 
| 219 | 
             
                parser.add_argument(
         | 
| 220 | 
             
                    "--input-dataset",
         | 
| 221 | 
             
                    type=str,
         | 
| 222 | 
             
                    required=True,
         | 
| 223 | 
            +
                    help="Input dataset ID on Hugging Face Hub",
         | 
| 224 | 
             
                )
         | 
| 225 | 
             
                parser.add_argument(
         | 
| 226 | 
            +
                    "--column", type=str, required=True, help="Name of the text column to classify"
         | 
|  | |
|  | |
|  | |
| 227 | 
             
                )
         | 
| 228 | 
             
                parser.add_argument(
         | 
| 229 | 
             
                    "--labels",
         | 
| 230 | 
             
                    type=str,
         | 
| 231 | 
             
                    required=True,
         | 
| 232 | 
            +
                    help="Comma-separated list of classification labels (e.g., 'positive,negative')",
         | 
| 233 | 
             
                )
         | 
| 234 | 
             
                parser.add_argument(
         | 
| 235 | 
             
                    "--output-dataset",
         | 
| 236 | 
             
                    type=str,
         | 
| 237 | 
             
                    required=True,
         | 
| 238 | 
            +
                    help="Output dataset ID on Hugging Face Hub",
         | 
| 239 | 
             
                )
         | 
| 240 | 
            +
             | 
| 241 | 
             
                # Optional arguments
         | 
| 242 | 
             
                parser.add_argument(
         | 
| 243 | 
             
                    "--model",
         | 
| 244 | 
             
                    type=str,
         | 
| 245 | 
             
                    default=DEFAULT_MODEL,
         | 
| 246 | 
            +
                    help=f"Model to use for classification (default: {DEFAULT_MODEL})",
         | 
| 247 | 
             
                )
         | 
| 248 | 
             
                # Removed --batch-size argument as vLLM handles batching internally
         | 
| 249 | 
             
                parser.add_argument(
         | 
| 250 | 
            +
                    "--label-descriptions",
         | 
| 251 | 
             
                    type=str,
         | 
| 252 | 
            +
                    default=None,
         | 
| 253 | 
            +
                    help="Descriptions for each label in format 'label1:description1,label2:description2'",
         | 
| 254 | 
            +
                )
         | 
| 255 | 
            +
                parser.add_argument(
         | 
| 256 | 
            +
                    "--enable-reasoning",
         | 
| 257 | 
            +
                    action="store_true",
         | 
| 258 | 
            +
                    help="Enable reasoning mode with thinking traces (disables structured outputs)",
         | 
| 259 | 
             
                )
         | 
| 260 | 
             
                parser.add_argument(
         | 
| 261 | 
             
                    "--max-samples",
         | 
| 262 | 
             
                    type=int,
         | 
| 263 | 
             
                    default=None,
         | 
| 264 | 
            +
                    help="Maximum number of samples to process (for testing)",
         | 
| 265 | 
             
                )
         | 
| 266 | 
             
                parser.add_argument(
         | 
| 267 | 
             
                    "--hf-token",
         | 
| 268 | 
             
                    type=str,
         | 
| 269 | 
             
                    default=None,
         | 
| 270 | 
            +
                    help="Hugging Face API token (default: auto-detect from HF_TOKEN env var or huggingface-cli login)",
         | 
| 271 | 
             
                )
         | 
| 272 | 
             
                parser.add_argument(
         | 
| 273 | 
             
                    "--split",
         | 
| 274 | 
             
                    type=str,
         | 
| 275 | 
             
                    default="train",
         | 
| 276 | 
            +
                    help="Dataset split to process (default: train)",
         | 
| 277 | 
             
                )
         | 
| 278 | 
             
                parser.add_argument(
         | 
| 279 | 
             
                    "--temperature",
         | 
| 280 | 
             
                    type=float,
         | 
| 281 | 
             
                    default=0.1,
         | 
| 282 | 
            +
                    help="Temperature for generation (default: 0.1)",
         | 
| 283 | 
             
                )
         | 
| 284 | 
             
                parser.add_argument(
         | 
| 285 | 
             
                    "--max-tokens",
         | 
| 286 | 
             
                    type=int,
         | 
| 287 | 
            +
                    default=100,
         | 
| 288 | 
            +
                    help="Maximum tokens to generate (default: 100, automatically increased 20x for reasoning mode)",
         | 
| 289 | 
             
                )
         | 
| 290 | 
             
                parser.add_argument(
         | 
| 291 | 
             
                    "--guided-backend",
         | 
| 292 | 
             
                    type=str,
         | 
| 293 | 
             
                    default="outlines",
         | 
| 294 | 
            +
                    help="Guided decoding backend (default: outlines)",
         | 
| 295 | 
             
                )
         | 
| 296 | 
            +
                parser.add_argument(
         | 
| 297 | 
            +
                    "--shuffle",
         | 
| 298 | 
            +
                    action="store_true",
         | 
| 299 | 
            +
                    help="Shuffle dataset before selecting samples (useful with --max-samples for random sampling)",
         | 
| 300 | 
            +
                )
         | 
| 301 | 
            +
                parser.add_argument(
         | 
| 302 | 
            +
                    "--shuffle-seed",
         | 
| 303 | 
            +
                    type=int,
         | 
| 304 | 
            +
                    default=42,
         | 
| 305 | 
            +
                    help="Random seed for shuffling (default: 42)",
         | 
| 306 | 
            +
                )
         | 
| 307 | 
            +
             | 
| 308 | 
             
                return parser.parse_args()
         | 
| 309 |  | 
| 310 |  | 
|  | |
| 312 | 
             
                """Preprocess text for classification."""
         | 
| 313 | 
             
                if not text or not isinstance(text, str):
         | 
| 314 | 
             
                    return ""
         | 
| 315 | 
            +
             | 
| 316 | 
             
                # Strip whitespace
         | 
| 317 | 
             
                text = text.strip()
         | 
| 318 | 
            +
             | 
| 319 | 
             
                # Truncate if too long
         | 
| 320 | 
             
                if len(text) > MAX_TEXT_LENGTH:
         | 
| 321 | 
            +
                    text = f"{text[:MAX_TEXT_LENGTH]}..."
         | 
| 322 | 
            +
             | 
| 323 | 
             
                return text
         | 
| 324 |  | 
| 325 |  | 
| 326 | 
             
            def validate_text(text: str) -> bool:
         | 
| 327 | 
             
                """Check if text is valid for classification."""
         | 
| 328 | 
            +
                return bool(text and len(text) >= MIN_TEXT_LENGTH)
         | 
|  | |
|  | |
| 329 |  | 
| 330 |  | 
| 331 | 
             
            def prepare_prompts(
         | 
| 332 | 
            +
                texts: List[str], labels: List[str], tokenizer: AutoTokenizer, 
         | 
| 333 | 
            +
                label_descriptions: dict = None, enable_reasoning: bool = False
         | 
|  | |
| 334 | 
             
            ) -> tuple[List[str], List[int]]:
         | 
| 335 | 
            +
                """Prepare prompts using chat template for classification, filtering invalid texts."""
         | 
| 336 | 
             
                prompts = []
         | 
| 337 | 
             
                valid_indices = []
         | 
| 338 | 
            +
             | 
| 339 | 
             
                for i, text in enumerate(texts):
         | 
| 340 | 
             
                    processed_text = preprocess_text(text)
         | 
| 341 | 
             
                    if validate_text(processed_text):
         | 
| 342 | 
            +
                        # Create messages for chat template
         | 
| 343 | 
            +
                        messages = create_messages(processed_text, labels, label_descriptions, enable_reasoning)
         | 
| 344 | 
            +
                        
         | 
| 345 | 
            +
                        # Apply chat template
         | 
| 346 | 
            +
                        prompt = tokenizer.apply_chat_template(
         | 
| 347 | 
            +
                            messages,
         | 
| 348 | 
            +
                            tokenize=False,
         | 
| 349 | 
            +
                            add_generation_prompt=True
         | 
| 350 | 
             
                        )
         | 
| 351 | 
             
                        prompts.append(prompt)
         | 
| 352 | 
             
                        valid_indices.append(i)
         | 
| 353 | 
            +
             | 
| 354 | 
             
                return prompts, valid_indices
         | 
| 355 |  | 
| 356 |  | 
| 357 | 
             
            def main():
         | 
| 358 | 
             
                args = parse_args()
         | 
| 359 | 
            +
             | 
| 360 | 
             
                # Check authentication early
         | 
| 361 | 
             
                logger.info("Checking authentication...")
         | 
| 362 | 
            +
                token = args.hf_token or (os.environ.get("HF_TOKEN") or get_token())
         | 
| 363 | 
            +
             | 
|  | |
|  | |
|  | |
| 364 | 
             
                if not token:
         | 
| 365 | 
             
                    logger.error("No authentication token found. Please either:")
         | 
| 366 | 
             
                    logger.error("1. Run 'huggingface-cli login'")
         | 
| 367 | 
             
                    logger.error("2. Set HF_TOKEN environment variable")
         | 
| 368 | 
             
                    logger.error("3. Pass --hf-token argument")
         | 
| 369 | 
             
                    sys.exit(1)
         | 
| 370 | 
            +
             | 
| 371 | 
             
                # Validate token by checking who we are
         | 
| 372 | 
             
                try:
         | 
| 373 | 
             
                    api = HfApi(token=token)
         | 
|  | |
| 377 | 
             
                    logger.error(f"Authentication failed: {e}")
         | 
| 378 | 
             
                    logger.error("Please check your token is valid")
         | 
| 379 | 
             
                    sys.exit(1)
         | 
| 380 | 
            +
             | 
| 381 | 
             
                # Check CUDA availability
         | 
| 382 | 
             
                if not torch.cuda.is_available():
         | 
| 383 | 
             
                    logger.error("CUDA is not available. This script requires a GPU.")
         | 
| 384 | 
             
                    logger.error("Please run on a machine with GPU support or use HF Jobs.")
         | 
| 385 | 
             
                    sys.exit(1)
         | 
| 386 | 
            +
             | 
| 387 | 
             
                logger.info(f"CUDA available. Using device: {torch.cuda.get_device_name(0)}")
         | 
| 388 | 
            +
             | 
| 389 | 
             
                # Parse and validate labels
         | 
| 390 | 
             
                labels = [label.strip() for label in args.labels.split(",")]
         | 
| 391 | 
             
                if len(labels) < 2:
         | 
|  | |
| 393 | 
             
                    sys.exit(1)
         | 
| 394 | 
             
                logger.info(f"Classification labels: {labels}")
         | 
| 395 |  | 
| 396 | 
            +
                # Parse label descriptions if provided
         | 
| 397 | 
            +
                label_descriptions = None
         | 
| 398 | 
            +
                if args.label_descriptions:
         | 
| 399 | 
            +
                    label_descriptions = parse_label_descriptions(args.label_descriptions)
         | 
| 400 | 
            +
                    logger.info("Label descriptions provided:")
         | 
| 401 | 
            +
                    for label, desc in label_descriptions.items():
         | 
| 402 | 
            +
                        logger.info(f"  {label}: {desc}")
         | 
| 403 | 
            +
             | 
| 404 | 
             
                # Load dataset
         | 
| 405 | 
             
                logger.info(f"Loading dataset: {args.input_dataset}")
         | 
| 406 | 
             
                try:
         | 
| 407 | 
             
                    dataset = load_dataset(args.input_dataset, split=args.split)
         | 
| 408 | 
            +
                    logger.info(f"Loaded {len(dataset)} samples from split '{args.split}'")
         | 
| 409 | 
            +
             | 
| 410 | 
            +
                    # Shuffle if requested
         | 
| 411 | 
            +
                    if args.shuffle:
         | 
| 412 | 
            +
                        logger.info(f"Shuffling dataset with seed {args.shuffle_seed}")
         | 
| 413 | 
            +
                        dataset = dataset.shuffle(seed=args.shuffle_seed)
         | 
| 414 | 
            +
             | 
| 415 | 
             
                    # Limit samples if specified
         | 
| 416 | 
             
                    if args.max_samples:
         | 
| 417 | 
             
                        dataset = dataset.select(range(min(args.max_samples, len(dataset))))
         | 
| 418 | 
             
                        logger.info(f"Limited dataset to {len(dataset)} samples")
         | 
| 419 | 
            +
                        if args.shuffle:
         | 
| 420 | 
            +
                            logger.info("Note: Samples were randomly selected due to shuffling")
         | 
| 421 | 
             
                except Exception as e:
         | 
| 422 | 
             
                    logger.error(f"Failed to load dataset: {e}")
         | 
| 423 | 
             
                    sys.exit(1)
         | 
| 424 | 
            +
             | 
| 425 | 
             
                # Verify column exists
         | 
| 426 | 
             
                if args.column not in dataset.column_names:
         | 
| 427 | 
             
                    logger.error(f"Column '{args.column}' not found in dataset.")
         | 
| 428 | 
             
                    logger.error(f"Available columns: {dataset.column_names}")
         | 
| 429 | 
             
                    sys.exit(1)
         | 
| 430 | 
            +
             | 
| 431 | 
             
                # Extract texts
         | 
| 432 | 
             
                texts = dataset[args.column]
         | 
| 433 | 
            +
             | 
| 434 | 
            +
                # Load tokenizer for chat template formatting
         | 
| 435 | 
            +
                logger.info(f"Loading tokenizer for {args.model}")
         | 
| 436 | 
            +
                try:
         | 
| 437 | 
            +
                    tokenizer = AutoTokenizer.from_pretrained(args.model, trust_remote_code=True)
         | 
| 438 | 
            +
                except Exception as e:
         | 
| 439 | 
            +
                    logger.error(f"Failed to load tokenizer: {e}")
         | 
| 440 | 
            +
                    sys.exit(1)
         | 
| 441 | 
            +
             | 
| 442 | 
             
                # Initialize vLLM
         | 
| 443 | 
             
                logger.info(f"Initializing vLLM with model: {args.model}")
         | 
| 444 | 
             
                logger.info(f"Using guided decoding backend: {args.guided_backend}")
         | 
|  | |
| 453 | 
             
                except Exception as e:
         | 
| 454 | 
             
                    logger.error(f"Failed to initialize vLLM: {e}")
         | 
| 455 | 
             
                    sys.exit(1)
         | 
| 456 | 
            +
             | 
| 457 | 
            +
                # Set up sampling parameters based on mode
         | 
| 458 | 
            +
                if args.enable_reasoning:
         | 
| 459 | 
            +
                    # Reasoning mode: no guided decoding, much more tokens for thinking
         | 
| 460 | 
            +
                    sampling_params = SamplingParams(
         | 
| 461 | 
            +
                        temperature=args.temperature,
         | 
| 462 | 
            +
                        max_tokens=args.max_tokens * 20,  # 20x more tokens for extensive reasoning
         | 
| 463 | 
            +
                    )
         | 
| 464 | 
            +
                    logger.info("Using reasoning mode - model will generate thinking traces with JSON output")
         | 
| 465 | 
            +
                else:
         | 
| 466 | 
            +
                    # Structured output mode: guided decoding
         | 
| 467 | 
            +
                    guided_params = GuidedDecodingParams(choice=labels)
         | 
| 468 | 
            +
                    sampling_params = SamplingParams(
         | 
| 469 | 
            +
                        guided_decoding=guided_params,
         | 
| 470 | 
            +
                        temperature=args.temperature,
         | 
| 471 | 
            +
                        max_tokens=args.max_tokens,
         | 
| 472 | 
            +
                    )
         | 
| 473 | 
            +
                    logger.info("Using structured output with guided_choice - outputs guaranteed to be valid labels")
         | 
| 474 | 
            +
             | 
| 475 | 
             
                # Prepare all prompts
         | 
| 476 | 
             
                logger.info("Preparing prompts for classification...")
         | 
| 477 | 
            +
                all_prompts, valid_indices = prepare_prompts(texts, labels, tokenizer, label_descriptions, args.enable_reasoning)
         | 
| 478 | 
            +
             | 
| 479 | 
             
                if not all_prompts:
         | 
| 480 | 
             
                    logger.error("No valid texts found for classification.")
         | 
| 481 | 
             
                    sys.exit(1)
         | 
| 482 | 
            +
             | 
| 483 | 
             
                logger.info(f"Prepared {len(all_prompts)} valid prompts out of {len(texts)} texts")
         | 
| 484 | 
            +
             | 
| 485 | 
             
                # Let vLLM handle batching internally
         | 
| 486 | 
             
                logger.info("Starting classification (vLLM will handle batching internally)...")
         | 
| 487 | 
            +
             | 
| 488 | 
             
                try:
         | 
| 489 | 
             
                    # Generate all classifications at once - vLLM handles batching
         | 
| 490 | 
             
                    outputs = llm.generate(all_prompts, sampling_params)
         | 
| 491 | 
            +
             | 
| 492 | 
            +
                    # Process outputs based on mode
         | 
| 493 | 
            +
                    if args.enable_reasoning:
         | 
| 494 | 
            +
                        # Reasoning mode: parse JSON and extract reasoning
         | 
| 495 | 
            +
                        all_classifications = [None] * len(texts)
         | 
| 496 | 
            +
                        all_reasoning = [None] * len(texts)
         | 
| 497 | 
            +
                        all_parsing_success = [False] * len(texts)
         | 
| 498 | 
            +
                        
         | 
| 499 | 
            +
                        for idx, output in enumerate(outputs):
         | 
| 500 | 
            +
                            original_idx = valid_indices[idx]
         | 
| 501 | 
            +
                            generated_text = output.outputs[0].text.strip()
         | 
| 502 | 
            +
                            
         | 
| 503 | 
            +
                            # Parse the reasoning output
         | 
| 504 | 
            +
                            label, reasoning, success = parse_reasoning_output(generated_text, labels)
         | 
| 505 | 
            +
                            
         | 
| 506 | 
            +
                            all_classifications[original_idx] = label
         | 
| 507 | 
            +
                            all_reasoning[original_idx] = reasoning
         | 
| 508 | 
            +
                            all_parsing_success[original_idx] = success
         | 
| 509 | 
            +
                            
         | 
| 510 | 
            +
                            # Log first few examples
         | 
| 511 | 
            +
                            if idx < 3:
         | 
| 512 | 
            +
                                logger.info(f"\nExample {idx + 1} output:")
         | 
| 513 | 
            +
                                logger.info(f"Raw output: {generated_text[:200]}...")
         | 
| 514 | 
            +
                                logger.info(f"Parsed label: {label}")
         | 
| 515 | 
            +
                                logger.info(f"Parsing success: {success}")
         | 
| 516 | 
            +
                        
         | 
| 517 | 
            +
                        # Count parsing statistics
         | 
| 518 | 
            +
                        parsing_success_count = sum(1 for s in all_parsing_success if s)
         | 
| 519 | 
            +
                        parsing_fail_count = sum(1 for s in all_parsing_success if s is not None and not s)
         | 
| 520 | 
            +
                        logger.info(f"\nParsing statistics:")
         | 
| 521 | 
            +
                        logger.info(f"  Successful: {parsing_success_count}/{len(valid_indices)} ({parsing_success_count/len(valid_indices)*100:.1f}%)")
         | 
| 522 | 
            +
                        logger.info(f"  Failed: {parsing_fail_count}/{len(valid_indices)} ({parsing_fail_count/len(valid_indices)*100:.1f}%)")
         | 
| 523 | 
            +
                        
         | 
| 524 | 
            +
                        valid_texts = parsing_success_count
         | 
| 525 | 
            +
                    else:
         | 
| 526 | 
            +
                        # Structured output mode: direct classification
         | 
| 527 | 
            +
                        all_classifications = [None] * len(texts)
         | 
| 528 | 
            +
                        for idx, output in enumerate(outputs):
         | 
| 529 | 
            +
                            original_idx = valid_indices[idx]
         | 
| 530 | 
            +
                            generated_text = output.outputs[0].text.strip()
         | 
| 531 | 
            +
                            all_classifications[original_idx] = generated_text
         | 
| 532 | 
            +
                        
         | 
| 533 | 
            +
                        valid_texts = len(valid_indices)
         | 
| 534 | 
            +
             | 
| 535 | 
             
                    # Count statistics
         | 
|  | |
| 536 | 
             
                    total_texts = len(texts)
         | 
| 537 | 
            +
             | 
| 538 | 
             
                except Exception as e:
         | 
| 539 | 
             
                    logger.error(f"Classification failed: {e}")
         | 
| 540 | 
             
                    sys.exit(1)
         | 
| 541 | 
            +
             | 
| 542 | 
            +
                # Add columns to dataset
         | 
| 543 | 
             
                dataset = dataset.add_column("classification", all_classifications)
         | 
| 544 |  | 
| 545 | 
            +
                if args.enable_reasoning:
         | 
| 546 | 
            +
                    dataset = dataset.add_column("reasoning", all_reasoning)
         | 
| 547 | 
            +
                    dataset = dataset.add_column("parsing_success", all_parsing_success)
         | 
| 548 | 
            +
             | 
| 549 | 
             
                # Calculate statistics
         | 
| 550 | 
             
                none_count = total_texts - valid_texts
         | 
| 551 | 
             
                if none_count > 0:
         | 
| 552 | 
            +
                    logger.warning(
         | 
| 553 | 
            +
                        f"{none_count} texts were too short or invalid for classification"
         | 
| 554 | 
            +
                    )
         | 
| 555 | 
            +
             | 
| 556 | 
             
                # Show classification distribution
         | 
| 557 | 
             
                label_counts = {label: all_classifications.count(label) for label in labels}
         | 
| 558 | 
            +
                
         | 
| 559 | 
            +
                # Count None values separately
         | 
| 560 | 
            +
                none_classifications = all_classifications.count(None)
         | 
| 561 | 
            +
                
         | 
| 562 | 
             
                logger.info("Classification distribution:")
         | 
| 563 | 
             
                for label, count in label_counts.items():
         | 
| 564 | 
             
                    percentage = count / total_texts * 100 if total_texts > 0 else 0
         | 
| 565 | 
             
                    logger.info(f"  {label}: {count} ({percentage:.1f}%)")
         | 
|  | |
|  | |
|  | |
| 566 |  | 
| 567 | 
            +
                if none_classifications > 0:
         | 
| 568 | 
            +
                    none_percentage = none_classifications / total_texts * 100
         | 
| 569 | 
            +
                    if args.enable_reasoning:
         | 
| 570 | 
            +
                        logger.info(f"  Failed to parse: {none_classifications} ({none_percentage:.1f}%)")
         | 
| 571 | 
            +
                    else:
         | 
| 572 | 
            +
                        logger.info(f"  Invalid/Skipped: {none_classifications} ({none_percentage:.1f}%)")
         | 
| 573 | 
            +
             | 
| 574 | 
             
                # Log success rate
         | 
| 575 | 
             
                success_rate = (valid_texts / total_texts * 100) if total_texts > 0 else 0
         | 
| 576 | 
             
                logger.info(f"Classification success rate: {success_rate:.1f}%")
         | 
| 577 | 
            +
             | 
| 578 | 
             
                # Save to Hub (token already validated at start)
         | 
| 579 | 
             
                logger.info(f"Pushing dataset to Hub: {args.output_dataset}")
         | 
| 580 | 
             
                try:
         | 
| 581 | 
             
                    dataset.push_to_hub(
         | 
| 582 | 
             
                        args.output_dataset,
         | 
| 583 | 
             
                        token=token,
         | 
| 584 | 
            +
                        commit_message=f"Add classifications using {args.model} {'with reasoning' if args.enable_reasoning else 'with structured outputs'}",
         | 
| 585 | 
            +
                    )
         | 
| 586 | 
            +
                    logger.info(
         | 
| 587 | 
            +
                        f"Successfully pushed to: https://huggingface.co/datasets/{args.output_dataset}"
         | 
| 588 | 
             
                    )
         | 
|  | |
| 589 | 
             
                except Exception as e:
         | 
| 590 | 
             
                    logger.error(f"Failed to push to Hub: {e}")
         | 
| 591 | 
             
                    sys.exit(1)
         | 
|  | |
| 593 |  | 
| 594 | 
             
            if __name__ == "__main__":
         | 
| 595 | 
             
                if len(sys.argv) == 1:
         | 
| 596 | 
            +
                    print("Example commands:")
         | 
| 597 | 
            +
                    print("\n# Simple classification:")
         | 
| 598 | 
            +
                    print("uv run classify-dataset.py \\")
         | 
| 599 | 
            +
                    print("  --input-dataset stanfordnlp/imdb \\")
         | 
| 600 | 
            +
                    print("  --column text \\")
         | 
| 601 | 
            +
                    print("  --labels 'positive,negative' \\")
         | 
| 602 | 
            +
                    print("  --output-dataset user/imdb-classified")
         | 
| 603 | 
            +
                    print("\n# With label descriptions:")
         | 
| 604 | 
            +
                    print("uv run classify-dataset.py \\")
         | 
| 605 | 
            +
                    print("  --input-dataset user/support-tickets \\")
         | 
| 606 | 
            +
                    print("  --column content \\")
         | 
| 607 | 
            +
                    print("  --labels 'bug,feature,question' \\")
         | 
| 608 | 
            +
                    print("  --label-descriptions 'bug:something is broken or not working,feature:request for new functionality,question:asking for help or clarification' \\")
         | 
| 609 | 
            +
                    print("  --output-dataset user/tickets-classified")
         | 
| 610 | 
            +
                    print("\n# With reasoning mode (thinking + JSON output):")
         | 
| 611 | 
            +
                    print("uv run classify-dataset.py \\")
         | 
| 612 | 
            +
                    print("  --input-dataset stanfordnlp/imdb \\")
         | 
| 613 | 
            +
                    print("  --column text \\")
         | 
| 614 | 
            +
                    print("  --labels 'positive,negative,neutral' \\")
         | 
| 615 | 
            +
                    print("  --enable-reasoning \\")
         | 
| 616 | 
            +
                    print("  --output-dataset user/imdb-reasoned")
         | 
| 617 | 
            +
                    print("\n# HF Jobs example:")
         | 
| 618 | 
             
                    print("hf jobs uv run \\")
         | 
| 619 | 
             
                    print("  --flavor l4x1 \\")
         | 
| 620 | 
             
                    print("  --image vllm/vllm-openai:latest \\")
         | 
|  | |
| 624 | 
             
                    print("  --labels 'positive,negative' \\")
         | 
| 625 | 
             
                    print("  --output-dataset user/imdb-classified")
         | 
| 626 | 
             
                    sys.exit(0)
         | 
| 627 | 
            +
             | 
| 628 | 
            +
                main()
         | 

