KT313 commited on
Commit
960fa84
·
verified ·
1 Parent(s): c48d40f

stop iterating through dataset after max_samples is reached and read language filter columns first

Browse files

saves a loooot of time generating the train split, e.g. if you only need 100k samples from the python subset or so

Files changed (1) hide show
  1. github-code-clean.py +68 -41
github-code-clean.py CHANGED
@@ -1,19 +1,3 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """GitHub Code clean dataset."""
16
-
17
  import os
18
 
19
  import pyarrow as pa
@@ -93,12 +77,12 @@ _LICENSE_CONFIGS = ["all"] + _LICENSES
93
  class GithubCodeConfig(datasets.BuilderConfig):
94
  """BuilderConfig for the GitHub Code dataset."""
95
 
96
- def __init__(self, *args, languages=["all"], licenses=["all"], **kwargs):
97
  """BuilderConfig for the GitHub Code dataset.
98
-
99
  Args:
100
  languages (:obj:`List[str]`): List of languages to load.
101
  licenses (:obj:`List[str]`): List of licenses to load.
 
102
  **kwargs: keyword arguments forwarded to super.
103
  """
104
  super().__init__(
@@ -127,7 +111,7 @@ class GithubCodeConfig(datasets.BuilderConfig):
127
 
128
  self.languages = set(languages)
129
  self.licenses = set(licenses)
130
-
131
 
132
 
133
  class GithubCode(datasets.GeneratorBasedBuilder):
@@ -174,32 +158,75 @@ class GithubCode(datasets.GeneratorBasedBuilder):
174
 
175
  def _generate_examples(self, files):
176
  key = 0
 
 
 
177
  for file_idx, file in enumerate(files):
178
- with open(file, "rb") as f:
179
- parquet_file = pq.ParquetFile(f)
180
- for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
181
- pa_table = pa.Table.from_batches([record_batch])
182
- for row_index in range(pa_table.num_rows):
183
- row = pa_table.slice(row_index, 1).to_pydict()
184
-
185
- lang = lang_from_name(row['path'][0])
186
- license = row["license"][0]
187
-
188
- if self.config.filter_languages and not lang in self.config.languages:
189
- continue
190
- if self.config.filter_licenses and not license in self.config.licenses:
191
- continue
192
-
193
- yield key, {"code": row['code'][0],
194
- "repo_name": row['repo_name'][0],
195
- "path": row['path'][0],
196
- "license": license,
197
- "language": lang,
198
- "size": int(row['size'][0])}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  key += 1
 
 
 
 
200
 
201
 
202
  def lang_from_name(name):
203
  for extension in _EXTENSION_TO_LANG:
204
  if name.endswith(extension):
205
- return _EXTENSION_TO_LANG[extension]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
 
3
  import pyarrow as pa
 
77
  class GithubCodeConfig(datasets.BuilderConfig):
78
  """BuilderConfig for the GitHub Code dataset."""
79
 
80
+ def __init__(self, *args, languages=["all"], licenses=["all"], max_samples=None, **kwargs):
81
  """BuilderConfig for the GitHub Code dataset.
 
82
  Args:
83
  languages (:obj:`List[str]`): List of languages to load.
84
  licenses (:obj:`List[str]`): List of licenses to load.
85
+ max_samples (:obj:`int`, optional): Maximum number of samples to generate (for early stopping).
86
  **kwargs: keyword arguments forwarded to super.
87
  """
88
  super().__init__(
 
111
 
112
  self.languages = set(languages)
113
  self.licenses = set(licenses)
114
+ self.max_samples = max_samples
115
 
116
 
117
  class GithubCode(datasets.GeneratorBasedBuilder):
 
158
 
159
  def _generate_examples(self, files):
160
  key = 0
161
+ yielded_count = 0
162
+ max_samples = self.config.max_samples
163
+
164
  for file_idx, file in enumerate(files):
165
+ # Early stopping at file level
166
+ if max_samples is not None and yielded_count >= max_samples:
167
+ return
168
+
169
+ parquet_file = pq.ParquetFile(file)
170
+
171
+ # Process each row group separately (Parquet internal chunking)
172
+ for rg_idx in range(parquet_file.num_row_groups):
173
+ # Early stopping at row group level
174
+ if max_samples is not None and yielded_count >= max_samples:
175
+ return
176
+
177
+ # PASS 1: Read ONLY filter columns from this row group
178
+ filter_table = parquet_file.read_row_group(rg_idx, columns=['path', 'license'])
179
+
180
+ paths = filter_table['path'].to_pylist()
181
+ licenses = filter_table['license'].to_pylist()
182
+
183
+ # Find matching indices within this row group
184
+ matching_indices = []
185
+ matching_langs = []
186
+
187
+ for row_index in range(len(paths)):
188
+ if max_samples is not None and yielded_count + len(matching_indices) >= max_samples:
189
+ break
190
+
191
+ lang = lang_from_name(paths[row_index])
192
+ license = licenses[row_index]
193
+
194
+ if self.config.filter_languages and lang not in self.config.languages:
195
+ continue
196
+ if self.config.filter_licenses and license not in self.config.licenses:
197
+ continue
198
+
199
+ matching_indices.append(row_index)
200
+ matching_langs.append(lang)
201
+
202
+ # PASS 2: Read full row group ONLY if there are matches
203
+ if matching_indices:
204
+ # Now read ALL columns for this row group
205
+ full_table = parquet_file.read_row_group(rg_idx)
206
+
207
+ # Extract only matching rows
208
+ filtered_table = full_table.take(matching_indices)
209
+ batch_dict = filtered_table.to_pydict()
210
+
211
+ # Yield all matching rows
212
+ for i in range(len(matching_indices)):
213
+ yield key, {
214
+ "code": batch_dict['code'][i],
215
+ "repo_name": batch_dict['repo_name'][i],
216
+ "path": batch_dict['path'][i],
217
+ "license": batch_dict['license'][i],
218
+ "language": matching_langs[i],
219
+ "size": int(batch_dict['size'][i])
220
+ }
221
  key += 1
222
+ yielded_count += 1
223
+
224
+ if max_samples is not None and yielded_count >= max_samples:
225
+ return
226
 
227
 
228
  def lang_from_name(name):
229
  for extension in _EXTENSION_TO_LANG:
230
  if name.endswith(extension):
231
+ return _EXTENSION_TO_LANG[extension]
232
+ return None