Datasets:
				
			
			
	
			
	
		
			
	
		
		Convert dataset to Parquet
#5
by
						
SaylorTwift
	
							HF Staff
						- opened
							
					
- README.md +10 -4
- mkqa.py +0 -149
- mkqa/train-00000-of-00001.parquet +3 -0
    	
        README.md
    CHANGED
    
    | @@ -45,6 +45,7 @@ task_ids: | |
| 45 | 
             
            paperswithcode_id: mkqa
         | 
| 46 | 
             
            pretty_name: Multilingual Knowledge Questions and Answers
         | 
| 47 | 
             
            dataset_info:
         | 
|  | |
| 48 | 
             
              features:
         | 
| 49 | 
             
              - name: example_id
         | 
| 50 | 
             
                dtype: string
         | 
| @@ -626,13 +627,18 @@ dataset_info: | |
| 626 | 
             
                    dtype: string
         | 
| 627 | 
             
                  - name: aliases
         | 
| 628 | 
             
                    list: string
         | 
| 629 | 
            -
              config_name: mkqa
         | 
| 630 | 
             
              splits:
         | 
| 631 | 
             
              - name: train
         | 
| 632 | 
            -
                num_bytes:  | 
| 633 | 
             
                num_examples: 10000
         | 
| 634 | 
            -
              download_size:  | 
| 635 | 
            -
              dataset_size:  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 636 | 
             
            ---
         | 
| 637 |  | 
| 638 | 
             
            # Dataset Card for MKQA: Multilingual Knowledge Questions & Answers
         | 
|  | |
| 45 | 
             
            paperswithcode_id: mkqa
         | 
| 46 | 
             
            pretty_name: Multilingual Knowledge Questions and Answers
         | 
| 47 | 
             
            dataset_info:
         | 
| 48 | 
            +
              config_name: mkqa
         | 
| 49 | 
             
              features:
         | 
| 50 | 
             
              - name: example_id
         | 
| 51 | 
             
                dtype: string
         | 
|  | |
| 627 | 
             
                    dtype: string
         | 
| 628 | 
             
                  - name: aliases
         | 
| 629 | 
             
                    list: string
         | 
|  | |
| 630 | 
             
              splits:
         | 
| 631 | 
             
              - name: train
         | 
| 632 | 
            +
                num_bytes: 35957889
         | 
| 633 | 
             
                num_examples: 10000
         | 
| 634 | 
            +
              download_size: 19871622
         | 
| 635 | 
            +
              dataset_size: 35957889
         | 
| 636 | 
            +
            configs:
         | 
| 637 | 
            +
            - config_name: mkqa
         | 
| 638 | 
            +
              data_files:
         | 
| 639 | 
            +
              - split: train
         | 
| 640 | 
            +
                path: mkqa/train-*
         | 
| 641 | 
            +
              default: true
         | 
| 642 | 
             
            ---
         | 
| 643 |  | 
| 644 | 
             
            # Dataset Card for MKQA: Multilingual Knowledge Questions & Answers
         | 
    	
        mkqa.py
    DELETED
    
    | @@ -1,149 +0,0 @@ | |
| 1 | 
            -
            # coding=utf-8
         | 
| 2 | 
            -
            # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
         | 
| 3 | 
            -
            #
         | 
| 4 | 
            -
            # Licensed under the Apache License, Version 2.0 (the "License");
         | 
| 5 | 
            -
            # you may not use this file except in compliance with the License.
         | 
| 6 | 
            -
            # You may obtain a copy of the License at
         | 
| 7 | 
            -
            #
         | 
| 8 | 
            -
            #     http://www.apache.org/licenses/LICENSE-2.0
         | 
| 9 | 
            -
            #
         | 
| 10 | 
            -
            # Unless required by applicable law or agreed to in writing, software
         | 
| 11 | 
            -
            # distributed under the License is distributed on an "AS IS" BASIS,
         | 
| 12 | 
            -
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         | 
| 13 | 
            -
            # See the License for the specific language governing permissions and
         | 
| 14 | 
            -
            # limitations under the License.
         | 
| 15 | 
            -
             | 
| 16 | 
            -
            """MKQA: Multilingual Knowledge Questions & Answers"""
         | 
| 17 | 
            -
             | 
| 18 | 
            -
             | 
| 19 | 
            -
            import json
         | 
| 20 | 
            -
             | 
| 21 | 
            -
            import datasets
         | 
| 22 | 
            -
             | 
| 23 | 
            -
             | 
| 24 | 
            -
            _CITATION = """\
         | 
| 25 | 
            -
            @misc{mkqa,
         | 
| 26 | 
            -
                title = {MKQA: A Linguistically Diverse Benchmark for Multilingual Open Domain Question Answering},
         | 
| 27 | 
            -
                author = {Shayne Longpre and Yi Lu and Joachim Daiber},
         | 
| 28 | 
            -
                year = {2020},
         | 
| 29 | 
            -
                URL = {https://arxiv.org/pdf/2007.15207.pdf}
         | 
| 30 | 
            -
            }
         | 
| 31 | 
            -
            """
         | 
| 32 | 
            -
             | 
| 33 | 
            -
            _DESCRIPTION = """\
         | 
| 34 | 
            -
            We introduce MKQA, an open-domain question answering evaluation set comprising 10k question-answer pairs sampled from the Google Natural Questions dataset, aligned across 26 typologically diverse languages (260k question-answer pairs in total). For each query we collected new passage-independent answers. These queries and answers were then human translated into 25 Non-English languages.
         | 
| 35 | 
            -
            """
         | 
| 36 | 
            -
            _HOMEPAGE = "https://github.com/apple/ml-mkqa"
         | 
| 37 | 
            -
            _LICENSE = "CC BY-SA 3.0"
         | 
| 38 | 
            -
             | 
| 39 | 
            -
             | 
| 40 | 
            -
            _URLS = {"train": "https://github.com/apple/ml-mkqa/raw/main/dataset/mkqa.jsonl.gz"}
         | 
| 41 | 
            -
             | 
| 42 | 
            -
             | 
| 43 | 
            -
            class Mkqa(datasets.GeneratorBasedBuilder):
         | 
| 44 | 
            -
                """MKQA dataset"""
         | 
| 45 | 
            -
             | 
| 46 | 
            -
                VERSION = datasets.Version("1.0.0")
         | 
| 47 | 
            -
                BUILDER_CONFIGS = [
         | 
| 48 | 
            -
                    datasets.BuilderConfig(
         | 
| 49 | 
            -
                        name="mkqa",
         | 
| 50 | 
            -
                        version=VERSION,
         | 
| 51 | 
            -
                        description=_DESCRIPTION,
         | 
| 52 | 
            -
                    ),
         | 
| 53 | 
            -
                ]
         | 
| 54 | 
            -
             | 
| 55 | 
            -
                def _info(self):
         | 
| 56 | 
            -
                    langs = [
         | 
| 57 | 
            -
                        "ar",
         | 
| 58 | 
            -
                        "da",
         | 
| 59 | 
            -
                        "de",
         | 
| 60 | 
            -
                        "en",
         | 
| 61 | 
            -
                        "es",
         | 
| 62 | 
            -
                        "fi",
         | 
| 63 | 
            -
                        "fr",
         | 
| 64 | 
            -
                        "he",
         | 
| 65 | 
            -
                        "hu",
         | 
| 66 | 
            -
                        "it",
         | 
| 67 | 
            -
                        "ja",
         | 
| 68 | 
            -
                        "ko",
         | 
| 69 | 
            -
                        "km",
         | 
| 70 | 
            -
                        "ms",
         | 
| 71 | 
            -
                        "nl",
         | 
| 72 | 
            -
                        "no",
         | 
| 73 | 
            -
                        "pl",
         | 
| 74 | 
            -
                        "pt",
         | 
| 75 | 
            -
                        "ru",
         | 
| 76 | 
            -
                        "sv",
         | 
| 77 | 
            -
                        "th",
         | 
| 78 | 
            -
                        "tr",
         | 
| 79 | 
            -
                        "vi",
         | 
| 80 | 
            -
                        "zh_cn",
         | 
| 81 | 
            -
                        "zh_hk",
         | 
| 82 | 
            -
                        "zh_tw",
         | 
| 83 | 
            -
                    ]
         | 
| 84 | 
            -
             | 
| 85 | 
            -
                    # Preferring list type instead of datasets.Sequence
         | 
| 86 | 
            -
                    queries_features = {lan: datasets.Value("string") for lan in langs}
         | 
| 87 | 
            -
                    answer_feature = [
         | 
| 88 | 
            -
                        {
         | 
| 89 | 
            -
                            "type": datasets.ClassLabel(
         | 
| 90 | 
            -
                                names=[
         | 
| 91 | 
            -
                                    "entity",
         | 
| 92 | 
            -
                                    "long_answer",
         | 
| 93 | 
            -
                                    "unanswerable",
         | 
| 94 | 
            -
                                    "date",
         | 
| 95 | 
            -
                                    "number",
         | 
| 96 | 
            -
                                    "number_with_unit",
         | 
| 97 | 
            -
                                    "short_phrase",
         | 
| 98 | 
            -
                                    "binary",
         | 
| 99 | 
            -
                                ]
         | 
| 100 | 
            -
                            ),
         | 
| 101 | 
            -
                            "entity": datasets.Value("string"),
         | 
| 102 | 
            -
                            "text": datasets.Value("string"),
         | 
| 103 | 
            -
                            "aliases": [datasets.Value("string")],
         | 
| 104 | 
            -
                        }
         | 
| 105 | 
            -
                    ]
         | 
| 106 | 
            -
                    answer_features = {lan: answer_feature for lan in langs}
         | 
| 107 | 
            -
             | 
| 108 | 
            -
                    features = datasets.Features(
         | 
| 109 | 
            -
                        {
         | 
| 110 | 
            -
                            "example_id": datasets.Value("string"),
         | 
| 111 | 
            -
                            "queries": queries_features,
         | 
| 112 | 
            -
                            "query": datasets.Value("string"),
         | 
| 113 | 
            -
                            "answers": answer_features,
         | 
| 114 | 
            -
                        }
         | 
| 115 | 
            -
                    )
         | 
| 116 | 
            -
             | 
| 117 | 
            -
                    return datasets.DatasetInfo(
         | 
| 118 | 
            -
                        description=_DESCRIPTION,
         | 
| 119 | 
            -
                        features=features,
         | 
| 120 | 
            -
                        supervised_keys=None,
         | 
| 121 | 
            -
                        homepage=_HOMEPAGE,
         | 
| 122 | 
            -
                        license=_LICENSE,
         | 
| 123 | 
            -
                        citation=_CITATION,
         | 
| 124 | 
            -
                    )
         | 
| 125 | 
            -
             | 
| 126 | 
            -
                def _split_generators(self, dl_manager):
         | 
| 127 | 
            -
                    """Returns SplitGenerators."""
         | 
| 128 | 
            -
                    # download and extract URLs
         | 
| 129 | 
            -
                    urls_to_download = _URLS
         | 
| 130 | 
            -
                    downloaded_files = dl_manager.download_and_extract(urls_to_download)
         | 
| 131 | 
            -
             | 
| 132 | 
            -
                    return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})]
         | 
| 133 | 
            -
             | 
| 134 | 
            -
                def _generate_examples(self, filepath):
         | 
| 135 | 
            -
                    """Yields examples."""
         | 
| 136 | 
            -
                    with open(filepath, encoding="utf-8") as f:
         | 
| 137 | 
            -
                        for row in f:
         | 
| 138 | 
            -
                            data = json.loads(row)
         | 
| 139 | 
            -
                            data["example_id"] = str(data["example_id"])
         | 
| 140 | 
            -
                            id_ = data["example_id"]
         | 
| 141 | 
            -
                            for language in data["answers"].keys():
         | 
| 142 | 
            -
                                # Add default values for possible missing keys
         | 
| 143 | 
            -
                                for a in data["answers"][language]:
         | 
| 144 | 
            -
                                    if "aliases" not in a:
         | 
| 145 | 
            -
                                        a["aliases"] = []
         | 
| 146 | 
            -
                                    if "entity" not in a:
         | 
| 147 | 
            -
                                        a["entity"] = ""
         | 
| 148 | 
            -
             | 
| 149 | 
            -
                            yield id_, data
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        mkqa/train-00000-of-00001.parquet
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:aa5d4e6685777784c1dda9b89066eb051b5199a1b6ce6232f9adb24ff62703bd
         | 
| 3 | 
            +
            size 19871622
         | 
