Datasets:
				
			
			
	
			
	
		
			
	
		
		Commit 
							
							·
						
						5656dba
	
1
								Parent(s):
							
							d240107
								
refactor dataset loading script (#1)
Browse files- refactor dataset loading script (beddd3c05f28fa231aba7cd154f5b366be0d5b05)
Co-authored-by: Daniel van Strien <davanstrien@users.noreply.huggingface.co>
- DocLayNet.py +89 -85
 
    	
        DocLayNet.py
    CHANGED
    
    | 
         @@ -6,10 +6,10 @@ https://huggingface.co/datasets/ydshieh/coco_dataset_script/blob/main/coco_datas 
     | 
|
| 6 | 
         
             
            import json
         
     | 
| 7 | 
         
             
            import os
         
     | 
| 8 | 
         
             
            import datasets
         
     | 
| 
         | 
|
| 9 | 
         | 
| 10 | 
         | 
| 11 | 
         
             
            class COCOBuilderConfig(datasets.BuilderConfig):
         
     | 
| 12 | 
         
            -
             
     | 
| 13 | 
         
             
                def __init__(self, name, splits, **kwargs):
         
     | 
| 14 | 
         
             
                    super().__init__(name, **kwargs)
         
     | 
| 15 | 
         
             
                    self.splits = splits
         
     | 
| 
         @@ -43,12 +43,10 @@ _LICENSE = "CDLA-Permissive-1.0" 
     | 
|
| 43 | 
         
             
            # The HuggingFace dataset library don't host the datasets but only point to the original files
         
     | 
| 44 | 
         
             
            # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
         
     | 
| 45 | 
         | 
| 46 | 
         
            -
            # This script is supposed to work with local (downloaded) COCO dataset.
         
     | 
| 47 | 
         
             
            _URLs = {
         
     | 
| 48 | 
         
             
                "core": "https://codait-cos-dax.s3.us.cloud-object-storage.appdomain.cloud/dax-doclaynet/1.0.0/DocLayNet_core.zip",
         
     | 
| 49 | 
         
             
            }
         
     | 
| 50 | 
         | 
| 51 | 
         
            -
             
     | 
| 52 | 
         
             
            # Name of the dataset usually match the script name with CamelCase instead of snake_case
         
     | 
| 53 | 
         
             
            class COCODataset(datasets.GeneratorBasedBuilder):
         
     | 
| 54 | 
         
             
                """An example dataset script to work with the local (downloaded) COCO dataset"""
         
     | 
| 
         @@ -57,28 +55,51 @@ class COCODataset(datasets.GeneratorBasedBuilder): 
     | 
|
| 57 | 
         | 
| 58 | 
         
             
                BUILDER_CONFIG_CLASS = COCOBuilderConfig
         
     | 
| 59 | 
         
             
                BUILDER_CONFIGS = [
         
     | 
| 60 | 
         
            -
                    COCOBuilderConfig(name= 
     | 
| 61 | 
         
             
                ]
         
     | 
| 62 | 
         
             
                DEFAULT_CONFIG_NAME = "2022.08"
         
     | 
| 63 | 
         | 
| 64 | 
         
             
                def _info(self):
         
     | 
| 65 | 
         
            -
                     
     | 
| 66 | 
         
            -
             
     | 
| 67 | 
         
            -
             
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 68 | 
         
             
                        "id": datasets.Value("int64"),
         
     | 
| 69 | 
         
            -
                        " 
     | 
| 70 | 
         
            -
                        " 
     | 
| 71 | 
         
            -
                        " 
     | 
| 72 | 
         
            -
             
     | 
| 73 | 
         
            -
                         
     | 
| 74 | 
         
            -
                        "doc_category": datasets.Value("string"),  # high-level document category
         
     | 
| 75 | 
         
            -
                        "collection": datasets.Value("string"),  # sub-collection name
         
     | 
| 76 | 
         
            -
                        "doc_name": datasets.Value("string"),  # original document filename
         
     | 
| 77 | 
         
            -
                        "page_no": datasets.Value("int64"),  # page number in original document
         
     | 
| 78 | 
         
            -
                        # "precedence": datasets.Value("int64"),  # annotation order, non-zero in case of redundant double- or triple-annotation
         
     | 
| 79 | 
         
             
                    }
         
     | 
| 80 | 
         
            -
             
     | 
| 81 | 
         
            -
                    features = datasets.Features(feature_dict)
         
     | 
| 82 | 
         | 
| 83 | 
         
             
                    return datasets.DatasetInfo(
         
     | 
| 84 | 
         
             
                        # This is the description that will appear on the datasets page.
         
     | 
| 
         @@ -99,53 +120,41 @@ class COCODataset(datasets.GeneratorBasedBuilder): 
     | 
|
| 99 | 
         | 
| 100 | 
         
             
                def _split_generators(self, dl_manager):
         
     | 
| 101 | 
         
             
                    """Returns SplitGenerators."""
         
     | 
| 102 | 
         
            -
                    # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
         
     | 
| 103 | 
         
            -
                    # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
         
     | 
| 104 | 
         
            -
             
     | 
| 105 | 
         
            -
                    # data_dir = self.config.data_dir
         
     | 
| 106 | 
         
            -
                    # if not data_dir:
         
     | 
| 107 | 
         
            -
                    #     raise ValueError(
         
     | 
| 108 | 
         
            -
                    #         "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required."
         
     | 
| 109 | 
         
            -
                    #     )
         
     | 
| 110 | 
         
            -
             
     | 
| 111 | 
         
            -
                    # _DL_URLS = {
         
     | 
| 112 | 
         
            -
                    #     "train": os.path.join(data_dir, "train2017.zip"),
         
     | 
| 113 | 
         
            -
                    #     "val": os.path.join(data_dir, "val2017.zip"),
         
     | 
| 114 | 
         
            -
                    #     "test": os.path.join(data_dir, "test2017.zip"),
         
     | 
| 115 | 
         
            -
                    #     "annotations_trainval": os.path.join(data_dir, "annotations_trainval2017.zip"),
         
     | 
| 116 | 
         
            -
                    #     "image_info_test": os.path.join(data_dir, "image_info_test2017.zip"),
         
     | 
| 117 | 
         
            -
                    # }
         
     | 
| 118 | 
         
             
                    archive_path = dl_manager.download_and_extract(_URLs)
         
     | 
| 119 | 
         
            -
                    print("archive_path: ", archive_path)
         
     | 
| 120 | 
         
            -
             
     | 
| 121 | 
         
             
                    splits = []
         
     | 
| 122 | 
         
             
                    for split in self.config.splits:
         
     | 
| 123 | 
         
            -
                        if split ==  
     | 
| 124 | 
         
             
                            dataset = datasets.SplitGenerator(
         
     | 
| 125 | 
         
             
                                name=datasets.Split.TRAIN,
         
     | 
| 126 | 
         
             
                                # These kwargs will be passed to _generate_examples
         
     | 
| 127 | 
         
             
                                gen_kwargs={
         
     | 
| 128 | 
         
            -
                                    "json_path": os.path.join( 
     | 
| 
         | 
|
| 
         | 
|
| 129 | 
         
             
                                    "image_dir": os.path.join(archive_path["core"], "PNG"),
         
     | 
| 130 | 
         
             
                                    "split": "train",
         
     | 
| 131 | 
         
            -
                                }
         
     | 
| 132 | 
         
             
                            )
         
     | 
| 133 | 
         
            -
                        elif split in [ 
     | 
| 134 | 
         
             
                            dataset = datasets.SplitGenerator(
         
     | 
| 135 | 
         
             
                                name=datasets.Split.VALIDATION,
         
     | 
| 136 | 
         
             
                                # These kwargs will be passed to _generate_examples
         
     | 
| 137 | 
         
             
                                gen_kwargs={
         
     | 
| 138 | 
         
            -
                                    "json_path": os.path.join( 
     | 
| 
         | 
|
| 
         | 
|
| 139 | 
         
             
                                    "image_dir": os.path.join(archive_path["core"], "PNG"),
         
     | 
| 140 | 
         
             
                                    "split": "val",
         
     | 
| 141 | 
         
             
                                },
         
     | 
| 142 | 
         
             
                            )
         
     | 
| 143 | 
         
            -
                        elif split ==  
     | 
| 144 | 
         
             
                            dataset = datasets.SplitGenerator(
         
     | 
| 145 | 
         
             
                                name=datasets.Split.TEST,
         
     | 
| 146 | 
         
             
                                # These kwargs will be passed to _generate_examples
         
     | 
| 147 | 
         
             
                                gen_kwargs={
         
     | 
| 148 | 
         
            -
                                    "json_path": os.path.join( 
     | 
| 
         | 
|
| 
         | 
|
| 149 | 
         
             
                                    "image_dir": os.path.join(archive_path["core"], "PNG"),
         
     | 
| 150 | 
         
             
                                    "split": "test",
         
     | 
| 151 | 
         
             
                                },
         
     | 
| 
         @@ -154,53 +163,48 @@ class COCODataset(datasets.GeneratorBasedBuilder): 
     | 
|
| 154 | 
         
             
                            continue
         
     | 
| 155 | 
         | 
| 156 | 
         
             
                        splits.append(dataset)
         
     | 
| 157 | 
         
            -
             
     | 
| 158 | 
         
             
                    return splits
         
     | 
| 159 | 
         | 
| 160 | 
         
             
                def _generate_examples(
         
     | 
| 161 | 
         
             
                    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
         
     | 
| 162 | 
         
            -
                    self, 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 163 | 
         
             
                ):
         
     | 
| 164 | 
         
            -
                    """ 
     | 
| 165 | 
         
             
                    # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
         
     | 
| 166 | 
         
             
                    # The `key` is here for legacy reason (tfds) and is not important in itself.
         
     | 
| 167 | 
         
            -
             
     | 
| 168 | 
         
            -
             
     | 
| 169 | 
         
            -
             
     | 
| 170 | 
         
            -
             
     | 
| 171 | 
         
            -
             
     | 
| 172 | 
         
            -
             
     | 
| 173 | 
         
            -
             
     | 
| 174 | 
         
            -
             
     | 
| 175 | 
         
            -
             
     | 
| 176 | 
         
            -
             
     | 
| 177 | 
         
            -
             
     | 
| 178 | 
         
            -
             
     | 
| 179 | 
         
            -
             
     | 
| 180 | 
         
            -
             
     | 
| 181 | 
         
            -
             
     | 
| 182 | 
         
            -
             
     | 
| 183 | 
         
            -
                        annotations =  
     | 
| 184 | 
         
            -
             
     | 
| 185 | 
         
            -
                        # build a dict of image_id ->
         
     | 
| 186 | 
         
             
                        for annotation in annotations:
         
     | 
| 187 | 
         
            -
                             
     | 
| 188 | 
         
            -
                            image_info = d[annotation["image_id"]]
         
     | 
| 189 | 
         
            -
                            annotation.update(image_info)
         
     | 
| 190 | 
         
            -
                            annotation["id"] = _id
         
     | 
| 191 | 
         
            -
             
     | 
| 192 | 
         
            -
                        entries = annotations
         
     | 
| 193 | 
         
            -
             
     | 
| 194 | 
         
            -
                    for id_, entry in enumerate(entries):
         
     | 
| 195 | 
         | 
| 196 | 
         
            -
             
     | 
| 197 | 
         
            -
             
     | 
| 198 | 
         
            -
                         
     | 
| 199 | 
         
            -
             
     | 
| 200 | 
         
            -
             
     | 
| 201 | 
         
            -
             
     | 
| 202 | 
         
            -
             
     | 
| 203 | 
         
            -
             
     | 
| 204 | 
         
            -
             
     | 
| 205 | 
         
            -
             
     | 
| 206 | 
         
            -
                         
     | 
| 
         | 
| 
         | 
|
| 6 | 
         
             
            import json
         
     | 
| 7 | 
         
             
            import os
         
     | 
| 8 | 
         
             
            import datasets
         
     | 
| 9 | 
         
            +
            import collections
         
     | 
| 10 | 
         | 
| 11 | 
         | 
| 12 | 
         
             
            class COCOBuilderConfig(datasets.BuilderConfig):
         
     | 
| 
         | 
|
| 13 | 
         
             
                def __init__(self, name, splits, **kwargs):
         
     | 
| 14 | 
         
             
                    super().__init__(name, **kwargs)
         
     | 
| 15 | 
         
             
                    self.splits = splits
         
     | 
| 
         | 
|
| 43 | 
         
             
            # The HuggingFace dataset library don't host the datasets but only point to the original files
         
     | 
| 44 | 
         
             
            # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
         
     | 
| 45 | 
         | 
| 
         | 
|
| 46 | 
         
             
            _URLs = {
         
     | 
| 47 | 
         
             
                "core": "https://codait-cos-dax.s3.us.cloud-object-storage.appdomain.cloud/dax-doclaynet/1.0.0/DocLayNet_core.zip",
         
     | 
| 48 | 
         
             
            }
         
     | 
| 49 | 
         | 
| 
         | 
|
| 50 | 
         
             
            # Name of the dataset usually match the script name with CamelCase instead of snake_case
         
     | 
| 51 | 
         
             
            class COCODataset(datasets.GeneratorBasedBuilder):
         
     | 
| 52 | 
         
             
                """An example dataset script to work with the local (downloaded) COCO dataset"""
         
     | 
| 
         | 
|
| 55 | 
         | 
| 56 | 
         
             
                BUILDER_CONFIG_CLASS = COCOBuilderConfig
         
     | 
| 57 | 
         
             
                BUILDER_CONFIGS = [
         
     | 
| 58 | 
         
            +
                    COCOBuilderConfig(name="2022.08", splits=["train", "val", "test"]),
         
     | 
| 59 | 
         
             
                ]
         
     | 
| 60 | 
         
             
                DEFAULT_CONFIG_NAME = "2022.08"
         
     | 
| 61 | 
         | 
| 62 | 
         
             
                def _info(self):
         
     | 
| 63 | 
         
            +
                    features = datasets.Features(
         
     | 
| 64 | 
         
            +
                        {
         
     | 
| 65 | 
         
            +
                            "image_id": datasets.Value("int64"),
         
     | 
| 66 | 
         
            +
                            "image": datasets.Image(),
         
     | 
| 67 | 
         
            +
                            "width": datasets.Value("int32"),
         
     | 
| 68 | 
         
            +
                            "height": datasets.Value("int32"),
         
     | 
| 69 | 
         
            +
                            # Custom fields
         
     | 
| 70 | 
         
            +
                            "doc_category": datasets.Value(
         
     | 
| 71 | 
         
            +
                                "string"
         
     | 
| 72 | 
         
            +
                            ),  # high-level document category
         
     | 
| 73 | 
         
            +
                            "collection": datasets.Value("string"),  # sub-collection name
         
     | 
| 74 | 
         
            +
                            "doc_name": datasets.Value("string"),  # original document filename
         
     | 
| 75 | 
         
            +
                            "page_no": datasets.Value("int64"),  # page number in original document
         
     | 
| 76 | 
         
            +
                        }
         
     | 
| 77 | 
         
            +
                    )
         
     | 
| 78 | 
         
            +
                    object_dict = {
         
     | 
| 79 | 
         
            +
                        "category_id": datasets.ClassLabel(
         
     | 
| 80 | 
         
            +
                            names=[
         
     | 
| 81 | 
         
            +
                                "Caption",
         
     | 
| 82 | 
         
            +
                                "Footnote",
         
     | 
| 83 | 
         
            +
                                "Formula",
         
     | 
| 84 | 
         
            +
                                "List-item",
         
     | 
| 85 | 
         
            +
                                "Page-footer",
         
     | 
| 86 | 
         
            +
                                "Page-header",
         
     | 
| 87 | 
         
            +
                                "Picture",
         
     | 
| 88 | 
         
            +
                                "Section-header",
         
     | 
| 89 | 
         
            +
                                "Table",
         
     | 
| 90 | 
         
            +
                                "Text",
         
     | 
| 91 | 
         
            +
                                "Title",
         
     | 
| 92 | 
         
            +
                            ]
         
     | 
| 93 | 
         
            +
                        ),
         
     | 
| 94 | 
         
            +
                        "image_id": datasets.Value("string"),
         
     | 
| 95 | 
         
             
                        "id": datasets.Value("int64"),
         
     | 
| 96 | 
         
            +
                        "area": datasets.Value("int64"),
         
     | 
| 97 | 
         
            +
                        "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
         
     | 
| 98 | 
         
            +
                        "segmentation": [[datasets.Value("float32")]],
         
     | 
| 99 | 
         
            +
                        "iscrowd": datasets.Value("bool"),
         
     | 
| 100 | 
         
            +
                        "precedence": datasets.Value("int32"),
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 101 | 
         
             
                    }
         
     | 
| 102 | 
         
            +
                    features["objects"] = [object_dict]
         
     | 
| 
         | 
|
| 103 | 
         | 
| 104 | 
         
             
                    return datasets.DatasetInfo(
         
     | 
| 105 | 
         
             
                        # This is the description that will appear on the datasets page.
         
     | 
| 
         | 
|
| 120 | 
         | 
| 121 | 
         
             
                def _split_generators(self, dl_manager):
         
     | 
| 122 | 
         
             
                    """Returns SplitGenerators."""
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 123 | 
         
             
                    archive_path = dl_manager.download_and_extract(_URLs)
         
     | 
| 
         | 
|
| 
         | 
|
| 124 | 
         
             
                    splits = []
         
     | 
| 125 | 
         
             
                    for split in self.config.splits:
         
     | 
| 126 | 
         
            +
                        if split == "train":
         
     | 
| 127 | 
         
             
                            dataset = datasets.SplitGenerator(
         
     | 
| 128 | 
         
             
                                name=datasets.Split.TRAIN,
         
     | 
| 129 | 
         
             
                                # These kwargs will be passed to _generate_examples
         
     | 
| 130 | 
         
             
                                gen_kwargs={
         
     | 
| 131 | 
         
            +
                                    "json_path": os.path.join(
         
     | 
| 132 | 
         
            +
                                        archive_path["core"], "COCO", "train.json"
         
     | 
| 133 | 
         
            +
                                    ),
         
     | 
| 134 | 
         
             
                                    "image_dir": os.path.join(archive_path["core"], "PNG"),
         
     | 
| 135 | 
         
             
                                    "split": "train",
         
     | 
| 136 | 
         
            +
                                },
         
     | 
| 137 | 
         
             
                            )
         
     | 
| 138 | 
         
            +
                        elif split in ["val", "valid", "validation", "dev"]:
         
     | 
| 139 | 
         
             
                            dataset = datasets.SplitGenerator(
         
     | 
| 140 | 
         
             
                                name=datasets.Split.VALIDATION,
         
     | 
| 141 | 
         
             
                                # These kwargs will be passed to _generate_examples
         
     | 
| 142 | 
         
             
                                gen_kwargs={
         
     | 
| 143 | 
         
            +
                                    "json_path": os.path.join(
         
     | 
| 144 | 
         
            +
                                        archive_path["core"], "COCO", "val.json"
         
     | 
| 145 | 
         
            +
                                    ),
         
     | 
| 146 | 
         
             
                                    "image_dir": os.path.join(archive_path["core"], "PNG"),
         
     | 
| 147 | 
         
             
                                    "split": "val",
         
     | 
| 148 | 
         
             
                                },
         
     | 
| 149 | 
         
             
                            )
         
     | 
| 150 | 
         
            +
                        elif split == "test":
         
     | 
| 151 | 
         
             
                            dataset = datasets.SplitGenerator(
         
     | 
| 152 | 
         
             
                                name=datasets.Split.TEST,
         
     | 
| 153 | 
         
             
                                # These kwargs will be passed to _generate_examples
         
     | 
| 154 | 
         
             
                                gen_kwargs={
         
     | 
| 155 | 
         
            +
                                    "json_path": os.path.join(
         
     | 
| 156 | 
         
            +
                                        archive_path["core"], "COCO", "test.json"
         
     | 
| 157 | 
         
            +
                                    ),
         
     | 
| 158 | 
         
             
                                    "image_dir": os.path.join(archive_path["core"], "PNG"),
         
     | 
| 159 | 
         
             
                                    "split": "test",
         
     | 
| 160 | 
         
             
                                },
         
     | 
| 
         | 
|
| 163 | 
         
             
                            continue
         
     | 
| 164 | 
         | 
| 165 | 
         
             
                        splits.append(dataset)
         
     | 
| 
         | 
|
| 166 | 
         
             
                    return splits
         
     | 
| 167 | 
         | 
| 168 | 
         
             
                def _generate_examples(
         
     | 
| 169 | 
         
             
                    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
         
     | 
| 170 | 
         
            +
                    self,
         
     | 
| 171 | 
         
            +
                    json_path,
         
     | 
| 172 | 
         
            +
                    image_dir,
         
     | 
| 173 | 
         
            +
                    split,
         
     | 
| 174 | 
         
             
                ):
         
     | 
| 175 | 
         
            +
                    """Yields examples as (key, example) tuples."""
         
     | 
| 176 | 
         
             
                    # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
         
     | 
| 177 | 
         
             
                    # The `key` is here for legacy reason (tfds) and is not important in itself.
         
     | 
| 178 | 
         
            +
                    def _image_info_to_example(image_info, image_dir):
         
     | 
| 179 | 
         
            +
                        image = image_info["file_name"]
         
     | 
| 180 | 
         
            +
                        return {
         
     | 
| 181 | 
         
            +
                            "image_id": image_info["id"],
         
     | 
| 182 | 
         
            +
                            "image": os.path.join(image_dir, image),
         
     | 
| 183 | 
         
            +
                            "width": image_info["width"],
         
     | 
| 184 | 
         
            +
                            "height": image_info["height"],
         
     | 
| 185 | 
         
            +
                            "doc_category": image_info["doc_category"],
         
     | 
| 186 | 
         
            +
                            "collection": image_info["collection"],
         
     | 
| 187 | 
         
            +
                            "doc_name": image_info["doc_name"],
         
     | 
| 188 | 
         
            +
                            "page_no": image_info["page_no"],
         
     | 
| 189 | 
         
            +
                        }
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
                    with open(json_path, encoding="utf8") as f:
         
     | 
| 192 | 
         
            +
                        annotation_data = json.load(f)
         
     | 
| 193 | 
         
            +
                        images = annotation_data["images"]
         
     | 
| 194 | 
         
            +
                        annotations = annotation_data["annotations"]
         
     | 
| 195 | 
         
            +
                        image_id_to_annotations = collections.defaultdict(list)
         
     | 
| 
         | 
|
| 196 | 
         
             
                        for annotation in annotations:
         
     | 
| 197 | 
         
            +
                            image_id_to_annotations[annotation["image_id"]].append(annotation)
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 198 | 
         | 
| 199 | 
         
            +
                    for idx, image_info in enumerate(images):
         
     | 
| 200 | 
         
            +
                        example = _image_info_to_example(image_info, image_dir)
         
     | 
| 201 | 
         
            +
                        annotations = image_id_to_annotations[image_info["id"]]
         
     | 
| 202 | 
         
            +
                        objects = []
         
     | 
| 203 | 
         
            +
                        for annotation in annotations:
         
     | 
| 204 | 
         
            +
                            category_id = annotation["category_id"]  # Zero based counting
         
     | 
| 205 | 
         
            +
                            if category_id != -1:
         
     | 
| 206 | 
         
            +
                                category_id = category_id - 1
         
     | 
| 207 | 
         
            +
                            annotation["category_id"] = category_id
         
     | 
| 208 | 
         
            +
                            objects.append(annotation)
         
     | 
| 209 | 
         
            +
                        example["objects"] = objects
         
     | 
| 210 | 
         
            +
                        yield idx, example
         
     |