|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | """Cleaned Indonesian split of the KoPI corpus.""" | 
					
						
						|  | import json | 
					
						
						|  | import glob | 
					
						
						|  | import gzip | 
					
						
						|  | from posixpath import split | 
					
						
						|  | import textwrap | 
					
						
						|  | import datasets | 
					
						
						|  | import zstandard as zstd | 
					
						
						|  | logger = datasets.logging.get_logger(__name__) | 
					
						
						|  |  | 
					
						
						|  | _CITATION = """ | 
					
						
						|  | """ | 
					
						
						|  | _DESCRIPTION = """\ | 
					
						
						|  | """ | 
					
						
						|  | _TYPE = ['raw','dedup','neardup'] | 
					
						
						|  | _CONF_LANG = ['ace_Latn','ban_Latn','bjn_Latn','ind_Latn','jav_Latn','min_Latn','sun_Latn'] | 
					
						
						|  | _CONFIGS = [] | 
					
						
						|  | for j in _CONF_LANG: | 
					
						
						|  | for m in _TYPE: | 
					
						
						|  | _CONFIGS.append(j+'-'+m) | 
					
						
						|  | _ALL_CONFIG = ["all-raw", "all-dedup", "all-neardup"] + _CONFIGS | 
					
						
						|  | _HOMEPAGE = "https://huggingface.co/datasets/munggok/KoPI-NLLB" | 
					
						
						|  | _LICENSE = "ODC_C" | 
					
						
						|  | _BASE_URL = 'https://huggingface.co/datasets/munggok/KoPI-NLLB/resolve/main/{tipe}/{lang}.json.zst' | 
					
						
						|  |  | 
					
						
						|  | def kopi_nllb_constructor(nam): | 
					
						
						|  | return KoPINLLBConfig( | 
					
						
						|  | name=nam, | 
					
						
						|  | version=datasets.Version("1.0.0"), | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | class KoPINLLBConfig(datasets.BuilderConfig): | 
					
						
						|  | """BuilderConfig for the Clean KoPI corpus.""" | 
					
						
						|  | def __init__(self, **kwargs): | 
					
						
						|  | """BuilderConfig for Clean KoPI corpus. | 
					
						
						|  | Args: | 
					
						
						|  | **kwargs: keyword arguments forwarded to super. | 
					
						
						|  | """ | 
					
						
						|  | super().__init__(**kwargs) | 
					
						
						|  | class KoPINLLB(datasets.GeneratorBasedBuilder): | 
					
						
						|  | """KoPI corpus.""" | 
					
						
						|  | BUILDER_CONFIGS = [kopi_nllb_constructor(m) for m in _ALL_CONFIG ] | 
					
						
						|  |  | 
					
						
						|  | def _info(self): | 
					
						
						|  | return datasets.DatasetInfo( | 
					
						
						|  | description=_DESCRIPTION, | 
					
						
						|  | features=datasets.Features( | 
					
						
						|  | { | 
					
						
						|  | "text": datasets.Value("string"), | 
					
						
						|  | "url": datasets.Value("string"), | 
					
						
						|  | "score": datasets.Value("float32"), | 
					
						
						|  | "source": datasets.Value("string"), | 
					
						
						|  | } | 
					
						
						|  | ), | 
					
						
						|  | supervised_keys=None, | 
					
						
						|  | homepage=_HOMEPAGE, | 
					
						
						|  | license=_LICENSE, | 
					
						
						|  | citation=_CITATION, | 
					
						
						|  | ) | 
					
						
						|  | def _split_generators(self, dl_manager): | 
					
						
						|  | name = self.config.name.split("-") | 
					
						
						|  | if name[0] == "all": | 
					
						
						|  | train = [_BASE_URL.format(tipe=name[1],lang=m) for m in _CONF_LANG] | 
					
						
						|  | else: | 
					
						
						|  | train = [_BASE_URL.format(tipe=name[1],lang=name[0])] | 
					
						
						|  | train_downloaded_files = dl_manager.download(train) | 
					
						
						|  | return [ | 
					
						
						|  | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}) | 
					
						
						|  | ] | 
					
						
						|  | def _generate_examples(self, filepaths): | 
					
						
						|  | """This function returns the examples in the raw (text) form by iterating on all the files.""" | 
					
						
						|  | id_ = 0 | 
					
						
						|  | for filepath in filepaths: | 
					
						
						|  | logger.info(f"Generating examples from {filepath}") | 
					
						
						|  | with zstd.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: | 
					
						
						|  | for line in f: | 
					
						
						|  | if line: | 
					
						
						|  | example = json.loads(line) | 
					
						
						|  | if line: | 
					
						
						|  | example = json.loads(line) | 
					
						
						|  | yield id_, {'text':example['text'],'url':example['url'],'source':example['source'],'score': float(example['score'])} | 
					
						
						|  | id_ += 1 |