stop iterating through dataset after max_samples is reached and read language filter columns first (with license in the beginning now)

#7
by KT313 - opened
Files changed (1) hide show
  1. github-code-clean.py +68 -25
github-code-clean.py CHANGED
@@ -93,12 +93,12 @@ _LICENSE_CONFIGS = ["all"] + _LICENSES
93
  class GithubCodeConfig(datasets.BuilderConfig):
94
  """BuilderConfig for the GitHub Code dataset."""
95
 
96
- def __init__(self, *args, languages=["all"], licenses=["all"], **kwargs):
97
  """BuilderConfig for the GitHub Code dataset.
98
-
99
  Args:
100
  languages (:obj:`List[str]`): List of languages to load.
101
  licenses (:obj:`List[str]`): List of licenses to load.
 
102
  **kwargs: keyword arguments forwarded to super.
103
  """
104
  super().__init__(
@@ -127,7 +127,7 @@ class GithubCodeConfig(datasets.BuilderConfig):
127
 
128
  self.languages = set(languages)
129
  self.licenses = set(licenses)
130
-
131
 
132
 
133
  class GithubCode(datasets.GeneratorBasedBuilder):
@@ -174,32 +174,75 @@ class GithubCode(datasets.GeneratorBasedBuilder):
174
 
175
  def _generate_examples(self, files):
176
  key = 0
 
 
 
177
  for file_idx, file in enumerate(files):
178
- with open(file, "rb") as f:
179
- parquet_file = pq.ParquetFile(f)
180
- for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
181
- pa_table = pa.Table.from_batches([record_batch])
182
- for row_index in range(pa_table.num_rows):
183
- row = pa_table.slice(row_index, 1).to_pydict()
184
-
185
- lang = lang_from_name(row['path'][0])
186
- license = row["license"][0]
187
-
188
- if self.config.filter_languages and not lang in self.config.languages:
189
- continue
190
- if self.config.filter_licenses and not license in self.config.licenses:
191
- continue
192
-
193
- yield key, {"code": row['code'][0],
194
- "repo_name": row['repo_name'][0],
195
- "path": row['path'][0],
196
- "license": license,
197
- "language": lang,
198
- "size": int(row['size'][0])}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  key += 1
 
 
 
 
200
 
201
 
202
  def lang_from_name(name):
203
  for extension in _EXTENSION_TO_LANG:
204
  if name.endswith(extension):
205
- return _EXTENSION_TO_LANG[extension]
 
 
93
  class GithubCodeConfig(datasets.BuilderConfig):
94
  """BuilderConfig for the GitHub Code dataset."""
95
 
96
+ def __init__(self, *args, languages=["all"], licenses=["all"], max_samples=None, **kwargs):
97
  """BuilderConfig for the GitHub Code dataset.
 
98
  Args:
99
  languages (:obj:`List[str]`): List of languages to load.
100
  licenses (:obj:`List[str]`): List of licenses to load.
101
+ max_samples (:obj:`int`, optional): Maximum number of samples to generate (for early stopping).
102
  **kwargs: keyword arguments forwarded to super.
103
  """
104
  super().__init__(
 
127
 
128
  self.languages = set(languages)
129
  self.licenses = set(licenses)
130
+ self.max_samples = max_samples
131
 
132
 
133
  class GithubCode(datasets.GeneratorBasedBuilder):
 
174
 
175
  def _generate_examples(self, files):
176
  key = 0
177
+ yielded_count = 0
178
+ max_samples = self.config.max_samples
179
+
180
  for file_idx, file in enumerate(files):
181
+ # Early stopping at file level
182
+ if max_samples is not None and yielded_count >= max_samples:
183
+ return
184
+
185
+ parquet_file = pq.ParquetFile(file)
186
+
187
+ # Process each row group separately (Parquet internal chunking)
188
+ for rg_idx in range(parquet_file.num_row_groups):
189
+ # Early stopping at row group level
190
+ if max_samples is not None and yielded_count >= max_samples:
191
+ return
192
+
193
+ # PASS 1: Read ONLY filter columns from this row group
194
+ filter_table = parquet_file.read_row_group(rg_idx, columns=['path', 'license'])
195
+
196
+ paths = filter_table['path'].to_pylist()
197
+ licenses = filter_table['license'].to_pylist()
198
+
199
+ # Find matching indices within this row group
200
+ matching_indices = []
201
+ matching_langs = []
202
+
203
+ for row_index in range(len(paths)):
204
+ if max_samples is not None and yielded_count + len(matching_indices) >= max_samples:
205
+ break
206
+
207
+ lang = lang_from_name(paths[row_index])
208
+ license = licenses[row_index]
209
+
210
+ if self.config.filter_languages and lang not in self.config.languages:
211
+ continue
212
+ if self.config.filter_licenses and license not in self.config.licenses:
213
+ continue
214
+
215
+ matching_indices.append(row_index)
216
+ matching_langs.append(lang)
217
+
218
+ # PASS 2: Read full row group ONLY if there are matches
219
+ if matching_indices:
220
+ # Now read ALL columns for this row group
221
+ full_table = parquet_file.read_row_group(rg_idx)
222
+
223
+ # Extract only matching rows
224
+ filtered_table = full_table.take(matching_indices)
225
+ batch_dict = filtered_table.to_pydict()
226
+
227
+ # Yield all matching rows
228
+ for i in range(len(matching_indices)):
229
+ yield key, {
230
+ "code": batch_dict['code'][i],
231
+ "repo_name": batch_dict['repo_name'][i],
232
+ "path": batch_dict['path'][i],
233
+ "license": batch_dict['license'][i],
234
+ "language": matching_langs[i],
235
+ "size": int(batch_dict['size'][i])
236
+ }
237
  key += 1
238
+ yielded_count += 1
239
+
240
+ if max_samples is not None and yielded_count >= max_samples:
241
+ return
242
 
243
 
244
  def lang_from_name(name):
245
  for extension in _EXTENSION_TO_LANG:
246
  if name.endswith(extension):
247
+ return _EXTENSION_TO_LANG[extension]
248
+ return None