Upload sea_madlad.py with huggingface_hub
Browse files- sea_madlad.py +255 -0
    	
        sea_madlad.py
    ADDED
    
    | @@ -0,0 +1,255 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """
         | 
| 2 | 
            +
            SEA Crowd Data Loader for SEA MADLAD.
         | 
| 3 | 
            +
            """
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            import gzip
         | 
| 6 | 
            +
            import json
         | 
| 7 | 
            +
            from typing import Dict, List, Tuple
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            import datasets
         | 
| 10 | 
            +
            from datasets.download.download_manager import DownloadManager
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            from seacrowd.utils import schemas
         | 
| 13 | 
            +
            from seacrowd.utils.configs import SEACrowdConfig
         | 
| 14 | 
            +
            from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            _CITATION = r"""
         | 
| 17 | 
            +
            @misc{kudugunta2023madlad400,
         | 
| 18 | 
            +
                  title={MADLAD-400: A Multilingual And Document-Level Large Audited Dataset},
         | 
| 19 | 
            +
                  author={Sneha Kudugunta and Isaac Caswell and Biao Zhang and Xavier Garcia and Christopher A. Choquette-Choo and Katherine Lee and Derrick Xin and Aditya Kusupati and Romi Stella and Ankur Bapna and Orhan Firat},
         | 
| 20 | 
            +
                  year={2023},
         | 
| 21 | 
            +
                  eprint={2309.04662},
         | 
| 22 | 
            +
                  archivePrefix={arXiv},
         | 
| 23 | 
            +
                  primaryClass={cs.CL}
         | 
| 24 | 
            +
            }
         | 
| 25 | 
            +
            """
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            logger = datasets.logging.get_logger(__name__)
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            # this config is created for SEACrowd Dataloader
         | 
| 30 | 
            +
            _LANG_CONFIG = {
         | 
| 31 | 
            +
                "ace": {"name": "Aceh", "source_subset": "ace"},
         | 
| 32 | 
            +
                "akb": {"name": "Batak Angkola", "source_subset": "akb"},
         | 
| 33 | 
            +
                "ban": {"name": "Bali", "source_subset": "ban"},
         | 
| 34 | 
            +
                "bbc": {"name": "Batak Toba", "source_subset": "bbc"},
         | 
| 35 | 
            +
                "bew": {"name": "Betawi", "source_subset": "bew"},
         | 
| 36 | 
            +
                "btx": {"name": "Batak Karo", "source_subset": "btx"},
         | 
| 37 | 
            +
                "ceb": {"name": "Cebuano", "source_subset": "ceb"},
         | 
| 38 | 
            +
                "fil": {"name": "Filipino", "source_subset": "fil"},
         | 
| 39 | 
            +
                "gor": {"name": "Gorontalo", "source_subset": "gor"},
         | 
| 40 | 
            +
                "hil": {"name": "Hiligaynon", "source_subset": "hil"},
         | 
| 41 | 
            +
                "iba": {"name": "Iban", "source_subset": "iba"},
         | 
| 42 | 
            +
                "ilo": {"name": "Ilocano", "source_subset": "ilo"},
         | 
| 43 | 
            +
                "ind": {"name": "Indonesian", "source_subset": "id"},
         | 
| 44 | 
            +
                "jav": {"name": "Javanese", "source_subset": "jv"},
         | 
| 45 | 
            +
                "kac": {"name": "Jingpho", "source_subset": "kac"},
         | 
| 46 | 
            +
                "khm": {"name": "Khmer", "source_subset": "km"},
         | 
| 47 | 
            +
                "kxd": {"name": "Brunei", "source_subset": "ms_Arab_BN"},
         | 
| 48 | 
            +
                "lao": {"name": "Lao", "source_subset": "lo"},
         | 
| 49 | 
            +
                "mad": {"name": "Madura", "source_subset": "mad"},
         | 
| 50 | 
            +
                "mak": {"name": "Makasar", "source_subset": "mak"},
         | 
| 51 | 
            +
                "meo": {"name": "Kedah Malay", "source_subset": "meo"},
         | 
| 52 | 
            +
                "min": {"name": "Minangkabau", "source_subset": "min"},
         | 
| 53 | 
            +
                "mkn": {"name": "Kupang Malay", "source_subset": "mkn"},
         | 
| 54 | 
            +
                "msa": {"name": "Malay", "source_subset": "ms"},
         | 
| 55 | 
            +
                "msi": {"name": "Sabah Malay", "source_subset": "msi"},
         | 
| 56 | 
            +
                "mya": {"name": "Burmese", "source_subset": "my"},
         | 
| 57 | 
            +
                "nij": {"name": "Ngaju", "source_subset": "nij"},
         | 
| 58 | 
            +
                "nut": {"name": "Nung", "source_subset": "nut"},
         | 
| 59 | 
            +
                "pag": {"name": "Pangasinan", "source_subset": "pag"},
         | 
| 60 | 
            +
                "shn": {"name": "Shan", "source_subset": "shn"},
         | 
| 61 | 
            +
                "sun": {"name": "Sunda", "source_subset": "su"},
         | 
| 62 | 
            +
                "tet": {"name": "Tetun", "source_subset": "tet"},
         | 
| 63 | 
            +
                "tha": {"name": "Thai", "source_subset": "th"},
         | 
| 64 | 
            +
                "vie": {"name": "Vietnamese", "source_subset": "vi"},
         | 
| 65 | 
            +
                "war": {"name": "Waray-Waray", "source_subset": "war"},
         | 
| 66 | 
            +
            }
         | 
| 67 | 
            +
             | 
| 68 | 
            +
            # this config is copied and added from source dataloader
         | 
| 69 | 
            +
            # only using the `clean` values
         | 
| 70 | 
            +
            _N_SHARDS_PER_SPLIT = {
         | 
| 71 | 
            +
                "ace": 1,
         | 
| 72 | 
            +
                "akb": 1,
         | 
| 73 | 
            +
                "ban": 1,
         | 
| 74 | 
            +
                "bbc": 1,
         | 
| 75 | 
            +
                "bew": 1,
         | 
| 76 | 
            +
                "btx": 1,
         | 
| 77 | 
            +
                "ceb": 1,
         | 
| 78 | 
            +
                "fil": 1,
         | 
| 79 | 
            +
                "gor": 1,
         | 
| 80 | 
            +
                "hil": 1,
         | 
| 81 | 
            +
                "iba": 1,
         | 
| 82 | 
            +
                "id": 18,
         | 
| 83 | 
            +
                "ilo": 1,
         | 
| 84 | 
            +
                "jv": 1,
         | 
| 85 | 
            +
                "kac": 1,
         | 
| 86 | 
            +
                "km": 1,
         | 
| 87 | 
            +
                "lo": 1,
         | 
| 88 | 
            +
                "mad": 1,
         | 
| 89 | 
            +
                "mak": 1,
         | 
| 90 | 
            +
                "meo": 1,
         | 
| 91 | 
            +
                "min": 1,
         | 
| 92 | 
            +
                "mkn": 1,
         | 
| 93 | 
            +
                "ms": 2,
         | 
| 94 | 
            +
                "ms_Arab_BN": 1,
         | 
| 95 | 
            +
                "msi": 1,
         | 
| 96 | 
            +
                "my": 1,
         | 
| 97 | 
            +
                "nij": 1,
         | 
| 98 | 
            +
                "nut": 1,
         | 
| 99 | 
            +
                "pag": 1,
         | 
| 100 | 
            +
                "shn": 1,
         | 
| 101 | 
            +
                "su": 1,
         | 
| 102 | 
            +
                "tet": 1,
         | 
| 103 | 
            +
                "th": 21,
         | 
| 104 | 
            +
                "vi": 32,
         | 
| 105 | 
            +
                "war": 1,
         | 
| 106 | 
            +
            }
         | 
| 107 | 
            +
             | 
| 108 | 
            +
            _LOCAL = False
         | 
| 109 | 
            +
            _LANGUAGES = list(_LANG_CONFIG.keys())
         | 
| 110 | 
            +
             | 
| 111 | 
            +
             | 
| 112 | 
            +
            _DATASETNAME = "sea_madlad"
         | 
| 113 | 
            +
            _DESCRIPTION = r"""
         | 
| 114 | 
            +
                SEA MADLAD is a subset of MADLAD-400 (Multilingual Audited Dataset: Low-resource And Document-level), which is a document-level multilingual dataset based on Common Crawl.
         | 
| 115 | 
            +
                SEA MADLAD only filters the language of the "clean" subset, which covers 36 languages indigenous to SEA from 419 languages in total.
         | 
| 116 | 
            +
                As a result, some of SEA lang codes aren't available in this version because those belongs to the languages whose decision was to "remove from its clean version" based on MADLAD auditing process.
         | 
| 117 | 
            +
                MADLAD uses all snapshots of CommonCrawl available as of August 1, 2022.
         | 
| 118 | 
            +
                The primary advantage of this dataset over similar datasets is that it is more multilingual, it is audited and more highly filtered, and it is document-level.
         | 
| 119 | 
            +
                The main disadvantage is also its strength -- being more filtered, it may lack the recall needed for some applications.
         | 
| 120 | 
            +
            """
         | 
| 121 | 
            +
             | 
| 122 | 
            +
            _HOMEPAGE = "https://huggingface.co/datasets/allenai/MADLAD-400"
         | 
| 123 | 
            +
            _LICENSE = Licenses.CC_BY_4_0.value
         | 
| 124 | 
            +
             | 
| 125 | 
            +
            _URL = "https://huggingface.co/datasets/allenai/MADLAD-400/resolve/ecd71297d60c1eb996cd3d7c44c60ad5b55adfc6/data/{language}/{language}_{split}_{index:04d}.jsonl.gz"
         | 
| 126 | 
            +
             | 
| 127 | 
            +
            _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
         | 
| 128 | 
            +
            _SOURCE_VERSION = "1.0.0"
         | 
| 129 | 
            +
            _SEACROWD_VERSION = "2024.06.20"
         | 
| 130 | 
            +
             | 
| 131 | 
            +
            CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
         | 
| 132 | 
            +
             | 
| 133 | 
            +
             | 
| 134 | 
            +
            def conform_init_config():
         | 
| 135 | 
            +
                """Assertion Function for Instantiated Configs"""
         | 
| 136 | 
            +
                if len(_LANGUAGES) == 0:
         | 
| 137 | 
            +
                    raise AssertionError("No Languages detected from config!")
         | 
| 138 | 
            +
                if len(CONFIG_SUFFIXES_FOR_TASK) != len(_SUPPORTED_TASKS):
         | 
| 139 | 
            +
                    raise AssertionError("Config prefixes don't matched in terms of `len` with `_SUPPORTED_TASKS`!")
         | 
| 140 | 
            +
                if len(CONFIG_SUFFIXES_FOR_TASK) == 0:
         | 
| 141 | 
            +
                    raise AssertionError("Config prefixes and `_SUPPORTED_TASKS` have `len` of 0!")
         | 
| 142 | 
            +
             | 
| 143 | 
            +
             | 
| 144 | 
            +
            conform_init_config()
         | 
| 145 | 
            +
             | 
| 146 | 
            +
             | 
| 147 | 
            +
            def construct_configs_on_langs(languages: list = None) -> List[SEACrowdConfig]:
         | 
| 148 | 
            +
                """
         | 
| 149 | 
            +
                The function `construct_configs` constructs a list of SEACrowdConfig objects based on the provided
         | 
| 150 | 
            +
                languages or a default language, and returns the list.
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                input:
         | 
| 153 | 
            +
                    languages (list, default None): The `languages` parameter is a list that specifies the languages for which the
         | 
| 154 | 
            +
                    configurations need to be constructed. If no languages are provided (value=None), the first value in language config
         | 
| 155 | 
            +
                    will be used.
         | 
| 156 | 
            +
                output:
         | 
| 157 | 
            +
                    a list of `SEACrowdConfig` objects based on instantiated init variables
         | 
| 158 | 
            +
                """
         | 
| 159 | 
            +
             | 
| 160 | 
            +
                # set output var
         | 
| 161 | 
            +
                config_list = []
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                # construct zipped arg for config instantiation
         | 
| 164 | 
            +
                TASKS_AND_CONFIG_SUFFIX_PAIRS = list(zip(_SUPPORTED_TASKS, CONFIG_SUFFIXES_FOR_TASK))
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                # implement source schema
         | 
| 167 | 
            +
                version, config_name_prefix = _SOURCE_VERSION, "source"
         | 
| 168 | 
            +
                config_list += [
         | 
| 169 | 
            +
                    SEACrowdConfig(
         | 
| 170 | 
            +
                        name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}",
         | 
| 171 | 
            +
                        version=datasets.Version(version),
         | 
| 172 | 
            +
                        description=f"{_DATASETNAME} {config_name_prefix} schema for language code {_LANG}",
         | 
| 173 | 
            +
                        schema=f"{config_name_prefix}",
         | 
| 174 | 
            +
                        subset_id=_LANG,
         | 
| 175 | 
            +
                    )
         | 
| 176 | 
            +
                    for _LANG in languages
         | 
| 177 | 
            +
                ]
         | 
| 178 | 
            +
             | 
| 179 | 
            +
                # implement SEACrowd schema
         | 
| 180 | 
            +
                version, config_name_prefix = _SEACROWD_VERSION, "seacrowd"
         | 
| 181 | 
            +
                for task_obj, config_name_suffix in TASKS_AND_CONFIG_SUFFIX_PAIRS:
         | 
| 182 | 
            +
                    config_list += [
         | 
| 183 | 
            +
                        SEACrowdConfig(
         | 
| 184 | 
            +
                            name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}_{config_name_suffix}",
         | 
| 185 | 
            +
                            version=datasets.Version(version),
         | 
| 186 | 
            +
                            description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name} and language code {_LANG}",
         | 
| 187 | 
            +
                            schema=f"{config_name_prefix}_{config_name_suffix}",
         | 
| 188 | 
            +
                            subset_id=_LANG,
         | 
| 189 | 
            +
                        )
         | 
| 190 | 
            +
                        for _LANG in languages
         | 
| 191 | 
            +
                    ]
         | 
| 192 | 
            +
                return config_list
         | 
| 193 | 
            +
             | 
| 194 | 
            +
             | 
| 195 | 
            +
            class SEAMADLADDataset(datasets.GeneratorBasedBuilder):
         | 
| 196 | 
            +
                """SEA MADLAD dataset, subsetted from https://huggingface.co/datasets/allenai/MADLAD-400"""
         | 
| 197 | 
            +
             | 
| 198 | 
            +
                # get all schema w/o lang arg + get all schema w/ lang arg
         | 
| 199 | 
            +
                BUILDER_CONFIGS = construct_configs_on_langs(_LANGUAGES)
         | 
| 200 | 
            +
             | 
| 201 | 
            +
                def _info(self) -> datasets.DatasetInfo:
         | 
| 202 | 
            +
                    _config_schema_name = self.config.schema
         | 
| 203 | 
            +
                    logger.info(f"Received schema name: {self.config.schema}")
         | 
| 204 | 
            +
                    # self supervised training schema
         | 
| 205 | 
            +
                    if _config_schema_name == "source":
         | 
| 206 | 
            +
                        features = datasets.Features({"text": datasets.Value("string")})
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                    elif _config_schema_name == "seacrowd_ssp":
         | 
| 209 | 
            +
                        features = schemas.ssp_features
         | 
| 210 | 
            +
             | 
| 211 | 
            +
                    else:
         | 
| 212 | 
            +
                        raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
         | 
| 213 | 
            +
             | 
| 214 | 
            +
                    return datasets.DatasetInfo(
         | 
| 215 | 
            +
                        description=_DESCRIPTION,
         | 
| 216 | 
            +
                        features=features,
         | 
| 217 | 
            +
                        homepage=_HOMEPAGE,
         | 
| 218 | 
            +
                        license=_LICENSE,
         | 
| 219 | 
            +
                        citation=_CITATION,
         | 
| 220 | 
            +
                    )
         | 
| 221 | 
            +
             | 
| 222 | 
            +
                def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
         | 
| 223 | 
            +
                    # construct URL from "lang", "split" -> "clean" split, and "index" based on `_N_SHARDS_PER_SPLIT`
         | 
| 224 | 
            +
                    _lang = _LANG_CONFIG[self.config.subset_id]["source_subset"]
         | 
| 225 | 
            +
                    _split = "clean"
         | 
| 226 | 
            +
                    _data_list = [_URL.format(language=_lang, split=_split, index=idx) for idx in range(_N_SHARDS_PER_SPLIT[_lang])]
         | 
| 227 | 
            +
             | 
| 228 | 
            +
                    filepaths = dl_manager.download(_data_list)
         | 
| 229 | 
            +
             | 
| 230 | 
            +
                    return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths})]
         | 
| 231 | 
            +
             | 
| 232 | 
            +
                def _generate_examples(self, filepaths) -> Tuple[int, Dict]:
         | 
| 233 | 
            +
                    _config_schema_name = self.config.schema
         | 
| 234 | 
            +
             | 
| 235 | 
            +
                    # the id_ constructions follows the source Dataloader
         | 
| 236 | 
            +
                    id_ = 0
         | 
| 237 | 
            +
                    for filepath in filepaths:
         | 
| 238 | 
            +
                        logger.info("generating examples from = %s", filepath)
         | 
| 239 | 
            +
                        with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
         | 
| 240 | 
            +
                            for line in f:
         | 
| 241 | 
            +
                                if line:
         | 
| 242 | 
            +
                                    example = json.loads(line)
         | 
| 243 | 
            +
             | 
| 244 | 
            +
                                    # for source_schema
         | 
| 245 | 
            +
                                    if _config_schema_name == "source":
         | 
| 246 | 
            +
                                        yield id_, {colname: example[colname] for colname in self.info.features}
         | 
| 247 | 
            +
             | 
| 248 | 
            +
                                    # for ssp schema
         | 
| 249 | 
            +
                                    elif _config_schema_name == "seacrowd_ssp":
         | 
| 250 | 
            +
                                        yield id_, {"id": id_, "text": example["text"]}
         | 
| 251 | 
            +
             | 
| 252 | 
            +
                                    else:
         | 
| 253 | 
            +
                                        raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
         | 
| 254 | 
            +
             | 
| 255 | 
            +
                                    id_ += 1
         | 

