evalstate commited on
Commit
b7f1d29
Β·
1 Parent(s): e8aa09f

Add dataset_inspector.py from HF (referenced in scripts)

Browse files
Files changed (1) hide show
  1. dataset_inspector.py +416 -0
dataset_inspector.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # dependencies = []
4
+ # ///
5
+ """
6
+ Dataset Format Inspector for TRL Training (LLM-Optimized Output)
7
+
8
+ Inspects Hugging Face datasets to determine TRL training compatibility.
9
+ Uses Datasets Server API for instant results - no dataset download needed!
10
+
11
+ ULTRA-EFFICIENT: Uses HF Datasets Server API - completes in <2 seconds.
12
+
13
+ Usage with HF Jobs:
14
+ hf_jobs("uv", {
15
+ "script": "https://huggingface.co/datasets/evalstate/trl-helpers/raw/main/dataset_inspector.py",
16
+ "script_args": ["--dataset", "your/dataset", "--split", "train"]
17
+ })
18
+ """
19
+
20
+ import argparse
21
+ import sys
22
+ import json
23
+ import urllib.request
24
+ import urllib.parse
25
+ from typing import List, Dict, Any
26
+
27
+
28
+ def parse_args():
29
+ parser = argparse.ArgumentParser(description="Inspect dataset format for TRL training")
30
+ parser.add_argument("--dataset", type=str, required=True, help="Dataset name")
31
+ parser.add_argument("--split", type=str, default="train", help="Dataset split (default: train)")
32
+ parser.add_argument("--config", type=str, default="default", help="Dataset config name (default: default)")
33
+ parser.add_argument("--preview", type=int, default=150, help="Max chars per field preview")
34
+ parser.add_argument("--samples", type=int, default=5, help="Number of samples to fetch (default: 5)")
35
+ parser.add_argument("--json-output", action="store_true", help="Output as JSON")
36
+ return parser.parse_args()
37
+
38
+
39
+ def api_request(url: str) -> Dict:
40
+ """Make API request to Datasets Server"""
41
+ try:
42
+ with urllib.request.urlopen(url, timeout=10) as response:
43
+ return json.loads(response.read().decode())
44
+ except urllib.error.HTTPError as e:
45
+ if e.code == 404:
46
+ return None
47
+ raise Exception(f"API request failed: {e.code} {e.reason}")
48
+ except Exception as e:
49
+ raise Exception(f"API request failed: {str(e)}")
50
+
51
+
52
+ def get_splits(dataset: str) -> Dict:
53
+ """Get available splits for dataset"""
54
+ url = f"https://datasets-server.huggingface.co/splits?dataset={urllib.parse.quote(dataset)}"
55
+ return api_request(url)
56
+
57
+
58
+ def get_rows(dataset: str, config: str, split: str, offset: int = 0, length: int = 5) -> Dict:
59
+ """Get rows from dataset"""
60
+ url = f"https://datasets-server.huggingface.co/rows?dataset={urllib.parse.quote(dataset)}&config={config}&split={split}&offset={offset}&length={length}"
61
+ return api_request(url)
62
+
63
+
64
+ def find_columns(columns: List[str], patterns: List[str]) -> List[str]:
65
+ """Find columns matching patterns"""
66
+ return [c for c in columns if any(p in c.lower() for p in patterns)]
67
+
68
+
69
+ def check_sft_compatibility(columns: List[str]) -> Dict[str, Any]:
70
+ """Check SFT compatibility"""
71
+ has_messages = "messages" in columns
72
+ has_text = "text" in columns
73
+ has_prompt_completion = "prompt" in columns and "completion" in columns
74
+
75
+ ready = has_messages or has_text or has_prompt_completion
76
+
77
+ possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
78
+ possible_response = find_columns(columns, ["response", "completion", "output", "answer"])
79
+
80
+ return {
81
+ "ready": ready,
82
+ "reason": "messages" if has_messages else "text" if has_text else "prompt+completion" if has_prompt_completion else None,
83
+ "possible_prompt": possible_prompt[0] if possible_prompt else None,
84
+ "possible_response": possible_response[0] if possible_response else None,
85
+ "has_context": "context" in columns,
86
+ }
87
+
88
+
89
+ def check_dpo_compatibility(columns: List[str]) -> Dict[str, Any]:
90
+ """Check DPO compatibility"""
91
+ has_standard = "prompt" in columns and "chosen" in columns and "rejected" in columns
92
+
93
+ possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
94
+ possible_chosen = find_columns(columns, ["chosen", "preferred", "winner"])
95
+ possible_rejected = find_columns(columns, ["rejected", "dispreferred", "loser"])
96
+
97
+ can_map = bool(possible_prompt and possible_chosen and possible_rejected)
98
+
99
+ return {
100
+ "ready": has_standard,
101
+ "can_map": can_map,
102
+ "prompt_col": possible_prompt[0] if possible_prompt else None,
103
+ "chosen_col": possible_chosen[0] if possible_chosen else None,
104
+ "rejected_col": possible_rejected[0] if possible_rejected else None,
105
+ }
106
+
107
+
108
+ def check_grpo_compatibility(columns: List[str]) -> Dict[str, Any]:
109
+ """Check GRPO compatibility"""
110
+ has_prompt = "prompt" in columns
111
+ has_no_responses = "chosen" not in columns and "rejected" not in columns
112
+
113
+ possible_prompt = find_columns(columns, ["prompt", "instruction", "question", "input"])
114
+
115
+ return {
116
+ "ready": has_prompt and has_no_responses,
117
+ "can_map": bool(possible_prompt) and has_no_responses,
118
+ "prompt_col": possible_prompt[0] if possible_prompt else None,
119
+ }
120
+
121
+
122
+ def check_kto_compatibility(columns: List[str]) -> Dict[str, Any]:
123
+ """Check KTO compatibility"""
124
+ return {"ready": "prompt" in columns and "completion" in columns and "label" in columns}
125
+
126
+
127
+ def generate_mapping_code(method: str, info: Dict[str, Any]) -> str:
128
+ """Generate mapping code for a training method"""
129
+ if method == "SFT":
130
+ if info["ready"]:
131
+ return None
132
+
133
+ prompt_col = info.get("possible_prompt")
134
+ response_col = info.get("possible_response")
135
+ has_context = info.get("has_context", False)
136
+
137
+ if not prompt_col:
138
+ return None
139
+
140
+ if has_context and response_col:
141
+ return f"""def format_for_sft(example):
142
+ text = f"Instruction: {{example['{prompt_col}']}}\\n\\n"
143
+ if example.get('context'):
144
+ text += f"Context: {{example['context']}}\\n\\n"
145
+ text += f"Response: {{example['{response_col}']}}"
146
+ return {{'text': text}}
147
+
148
+ dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
149
+ elif response_col:
150
+ return f"""def format_for_sft(example):
151
+ return {{'text': f"{{example['{prompt_col}']}}\\n\\n{{example['{response_col}']}}}}
152
+
153
+ dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
154
+ else:
155
+ return f"""def format_for_sft(example):
156
+ return {{'text': example['{prompt_col}']}}
157
+
158
+ dataset = dataset.map(format_for_sft, remove_columns=dataset.column_names)"""
159
+
160
+ elif method == "DPO":
161
+ if info["ready"] or not info["can_map"]:
162
+ return None
163
+
164
+ return f"""def format_for_dpo(example):
165
+ return {{
166
+ 'prompt': example['{info['prompt_col']}'],
167
+ 'chosen': example['{info['chosen_col']}'],
168
+ 'rejected': example['{info['rejected_col']}'],
169
+ }}
170
+
171
+ dataset = dataset.map(format_for_dpo, remove_columns=dataset.column_names)"""
172
+
173
+ elif method == "GRPO":
174
+ if info["ready"] or not info["can_map"]:
175
+ return None
176
+
177
+ return f"""def format_for_grpo(example):
178
+ return {{'prompt': example['{info['prompt_col']}']}}
179
+
180
+ dataset = dataset.map(format_for_grpo, remove_columns=dataset.column_names)"""
181
+
182
+ return None
183
+
184
+
185
+ def format_value_preview(value: Any, max_chars: int) -> str:
186
+ """Format value for preview"""
187
+ if value is None:
188
+ return "None"
189
+ elif isinstance(value, str):
190
+ return value[:max_chars] + ("..." if len(value) > max_chars else "")
191
+ elif isinstance(value, list):
192
+ if len(value) > 0 and isinstance(value[0], dict):
193
+ return f"[{len(value)} items] Keys: {list(value[0].keys())}"
194
+ preview = str(value)
195
+ return preview[:max_chars] + ("..." if len(preview) > max_chars else "")
196
+ else:
197
+ preview = str(value)
198
+ return preview[:max_chars] + ("..." if len(preview) > max_chars else "")
199
+
200
+
201
+ def main():
202
+ args = parse_args()
203
+
204
+ print(f"Fetching dataset info via Datasets Server API...")
205
+
206
+ try:
207
+ # Get splits info
208
+ splits_data = get_splits(args.dataset)
209
+ if not splits_data or "splits" not in splits_data:
210
+ print(f"ERROR: Could not fetch splits for dataset '{args.dataset}'")
211
+ print(f" Dataset may not exist or is not accessible via Datasets Server API")
212
+ sys.exit(1)
213
+
214
+ # Find the right config
215
+ available_configs = set()
216
+ split_found = False
217
+ config_to_use = args.config
218
+
219
+ for split_info in splits_data["splits"]:
220
+ available_configs.add(split_info["config"])
221
+ if split_info["config"] == args.config and split_info["split"] == args.split:
222
+ split_found = True
223
+
224
+ # If default config not found, try first available
225
+ if not split_found and available_configs:
226
+ config_to_use = list(available_configs)[0]
227
+ print(f"Config '{args.config}' not found, trying '{config_to_use}'...")
228
+
229
+ # Get rows
230
+ rows_data = get_rows(args.dataset, config_to_use, args.split, offset=0, length=args.samples)
231
+
232
+ if not rows_data or "rows" not in rows_data:
233
+ print(f"ERROR: Could not fetch rows for dataset '{args.dataset}'")
234
+ print(f" Split '{args.split}' may not exist")
235
+ print(f" Available configs: {', '.join(sorted(available_configs))}")
236
+ sys.exit(1)
237
+
238
+ rows = rows_data["rows"]
239
+ if not rows:
240
+ print(f"ERROR: No rows found in split '{args.split}'")
241
+ sys.exit(1)
242
+
243
+ # Extract column info from first row
244
+ first_row = rows[0]["row"]
245
+ columns = list(first_row.keys())
246
+ features = rows_data.get("features", [])
247
+
248
+ # Get total count if available
249
+ total_examples = "Unknown"
250
+ for split_info in splits_data["splits"]:
251
+ if split_info["config"] == config_to_use and split_info["split"] == args.split:
252
+ total_examples = f"{split_info.get('num_examples', 'Unknown'):,}" if isinstance(split_info.get('num_examples'), int) else "Unknown"
253
+ break
254
+
255
+ except Exception as e:
256
+ print(f"ERROR: {str(e)}")
257
+ sys.exit(1)
258
+
259
+ # Run compatibility checks
260
+ sft_info = check_sft_compatibility(columns)
261
+ dpo_info = check_dpo_compatibility(columns)
262
+ grpo_info = check_grpo_compatibility(columns)
263
+ kto_info = check_kto_compatibility(columns)
264
+
265
+ # Determine recommended methods
266
+ recommended = []
267
+ if sft_info["ready"]:
268
+ recommended.append("SFT")
269
+ elif sft_info["possible_prompt"]:
270
+ recommended.append("SFT (needs mapping)")
271
+
272
+ if dpo_info["ready"]:
273
+ recommended.append("DPO")
274
+ elif dpo_info["can_map"]:
275
+ recommended.append("DPO (needs mapping)")
276
+
277
+ if grpo_info["ready"]:
278
+ recommended.append("GRPO")
279
+ elif grpo_info["can_map"]:
280
+ recommended.append("GRPO (needs mapping)")
281
+
282
+ if kto_info["ready"]:
283
+ recommended.append("KTO")
284
+
285
+ # JSON output mode
286
+ if args.json_output:
287
+ result = {
288
+ "dataset": args.dataset,
289
+ "config": config_to_use,
290
+ "split": args.split,
291
+ "total_examples": total_examples,
292
+ "columns": columns,
293
+ "features": [{"name": f["name"], "type": f["type"]} for f in features] if features else [],
294
+ "compatibility": {
295
+ "SFT": sft_info,
296
+ "DPO": dpo_info,
297
+ "GRPO": grpo_info,
298
+ "KTO": kto_info,
299
+ },
300
+ "recommended_methods": recommended,
301
+ }
302
+ print(json.dumps(result, indent=2))
303
+ sys.exit(0)
304
+
305
+ # Human-readable output optimized for LLM parsing
306
+ print("=" * 80)
307
+ print(f"DATASET INSPECTION RESULTS")
308
+ print("=" * 80)
309
+
310
+ print(f"\nDataset: {args.dataset}")
311
+ print(f"Config: {config_to_use}")
312
+ print(f"Split: {args.split}")
313
+ print(f"Total examples: {total_examples}")
314
+ print(f"Samples fetched: {len(rows)}")
315
+
316
+ print(f"\n{'COLUMNS':-<80}")
317
+ if features:
318
+ for feature in features:
319
+ print(f" {feature['name']}: {feature['type']}")
320
+ else:
321
+ for col in columns:
322
+ print(f" {col}: (type info not available)")
323
+
324
+ print(f"\n{'EXAMPLE DATA':-<80}")
325
+ example = first_row
326
+ for col in columns:
327
+ value = example.get(col)
328
+ display = format_value_preview(value, args.preview)
329
+ print(f"\n{col}:")
330
+ print(f" {display}")
331
+
332
+ print(f"\n{'TRAINING METHOD COMPATIBILITY':-<80}")
333
+
334
+ # SFT
335
+ print(f"\n[SFT] {'βœ“ READY' if sft_info['ready'] else 'βœ— NEEDS MAPPING'}")
336
+ if sft_info["ready"]:
337
+ print(f" Reason: Dataset has '{sft_info['reason']}' field")
338
+ print(f" Action: Use directly with SFTTrainer")
339
+ elif sft_info["possible_prompt"]:
340
+ print(f" Detected: prompt='{sft_info['possible_prompt']}' response='{sft_info['possible_response']}'")
341
+ print(f" Action: Apply mapping code (see below)")
342
+ else:
343
+ print(f" Status: Cannot determine mapping - manual inspection needed")
344
+
345
+ # DPO
346
+ print(f"\n[DPO] {'βœ“ READY' if dpo_info['ready'] else 'βœ— NEEDS MAPPING' if dpo_info['can_map'] else 'βœ— INCOMPATIBLE'}")
347
+ if dpo_info["ready"]:
348
+ print(f" Reason: Dataset has 'prompt', 'chosen', 'rejected' fields")
349
+ print(f" Action: Use directly with DPOTrainer")
350
+ elif dpo_info["can_map"]:
351
+ print(f" Detected: prompt='{dpo_info['prompt_col']}' chosen='{dpo_info['chosen_col']}' rejected='{dpo_info['rejected_col']}'")
352
+ print(f" Action: Apply mapping code (see below)")
353
+ else:
354
+ print(f" Status: Missing required fields (prompt + chosen + rejected)")
355
+
356
+ # GRPO
357
+ print(f"\n[GRPO] {'βœ“ READY' if grpo_info['ready'] else 'βœ— NEEDS MAPPING' if grpo_info['can_map'] else 'βœ— INCOMPATIBLE'}")
358
+ if grpo_info["ready"]:
359
+ print(f" Reason: Dataset has 'prompt' field")
360
+ print(f" Action: Use directly with GRPOTrainer")
361
+ elif grpo_info["can_map"]:
362
+ print(f" Detected: prompt='{grpo_info['prompt_col']}'")
363
+ print(f" Action: Apply mapping code (see below)")
364
+ else:
365
+ print(f" Status: Missing prompt field")
366
+
367
+ # KTO
368
+ print(f"\n[KTO] {'βœ“ READY' if kto_info['ready'] else 'βœ— INCOMPATIBLE'}")
369
+ if kto_info["ready"]:
370
+ print(f" Reason: Dataset has 'prompt', 'completion', 'label' fields")
371
+ print(f" Action: Use directly with KTOTrainer")
372
+ else:
373
+ print(f" Status: Missing required fields (prompt + completion + label)")
374
+
375
+ # Mapping code
376
+ print(f"\n{'MAPPING CODE (if needed)':-<80}")
377
+
378
+ mapping_needed = False
379
+
380
+ sft_mapping = generate_mapping_code("SFT", sft_info)
381
+ if sft_mapping:
382
+ print(f"\n# For SFT Training:")
383
+ print(sft_mapping)
384
+ mapping_needed = True
385
+
386
+ dpo_mapping = generate_mapping_code("DPO", dpo_info)
387
+ if dpo_mapping:
388
+ print(f"\n# For DPO Training:")
389
+ print(dpo_mapping)
390
+ mapping_needed = True
391
+
392
+ grpo_mapping = generate_mapping_code("GRPO", grpo_info)
393
+ if grpo_mapping:
394
+ print(f"\n# For GRPO Training:")
395
+ print(grpo_mapping)
396
+ mapping_needed = True
397
+
398
+ if not mapping_needed:
399
+ print("\nNo mapping needed - dataset is ready for training!")
400
+
401
+ print(f"\n{'SUMMARY':-<80}")
402
+ print(f"Recommended training methods: {', '.join(recommended) if recommended else 'None (dataset needs formatting)'}")
403
+ print(f"\nNote: Used Datasets Server API (instant, no download required)")
404
+
405
+ print("\n" + "=" * 80)
406
+ sys.exit(0)
407
+
408
+
409
+ if __name__ == "__main__":
410
+ try:
411
+ main()
412
+ except KeyboardInterrupt:
413
+ sys.exit(0)
414
+ except Exception as e:
415
+ print(f"ERROR: {e}", file=sys.stderr)
416
+ sys.exit(1)