nyanko7 commited on
Commit
1fb4b9e
·
1 Parent(s): c7804c9

Create coco.py

Browse files
Files changed (1) hide show
  1. coco.py +241 -0
coco.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """COCO"""
16
+ import json
17
+ import os
18
+ from pathlib import Path
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """
24
+ @article{DBLP:journals/corr/LinMBHPRDZ14,
25
+ author = {Tsung{-}Yi Lin and
26
+ Michael Maire and
27
+ Serge J. Belongie and
28
+ Lubomir D. Bourdev and
29
+ Ross B. Girshick and
30
+ James Hays and
31
+ Pietro Perona and
32
+ Deva Ramanan and
33
+ Piotr Doll{\'{a}}r and
34
+ C. Lawrence Zitnick},
35
+ title = {Microsoft {COCO:} Common Objects in Context},
36
+ journal = {CoRR},
37
+ volume = {abs/1405.0312},
38
+ year = {2014},
39
+ url = {http://arxiv.org/abs/1405.0312},
40
+ eprinttype = {arXiv},
41
+ eprint = {1405.0312},
42
+ timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
43
+ biburl = {https://dblp.org/rec/journals/corr/LinMBHPRDZ14.bib},
44
+ bibsource = {dblp computer science bibliography, https://dblp.org}
45
+ }
46
+ """
47
+
48
+ _DESCRIPTION = """
49
+ MS COCO is a large-scale object detection, segmentation, and captioning dataset.
50
+ COCO has several features: Object segmentation, Recognition in context, Superpixel stuff segmentation, 330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories, 5 captions per image, 250,000 people with keypoints.
51
+ """
52
+
53
+ _HOMEPAGE = "https://cocodataset.org/#home"
54
+
55
+ _LICENSE = "CC BY 4.0"
56
+
57
+
58
+ _IMAGES_URLS = {
59
+ "train": "https://huggingface.co/datasets/nyanko7/coco-hosted/resolve/main/train2014.zip",
60
+ "validation": "hhttps://huggingface.co/datasets/nyanko7/coco-hosted/resolve/main/val2014.zip",
61
+ }
62
+
63
+ _KARPATHY_FILES_URL = "https://huggingface.co/datasets/nyanko7/coco-hosted/resolve/main/caption_datasets.zip"
64
+
65
+ _SPLIT_MAP = {"train": "train2014", "validation": "val2014"}
66
+
67
+ _FEATURES = datasets.Features(
68
+ {
69
+ "image": datasets.Image(),
70
+ "filepath": datasets.Value("string"),
71
+ "sentids": [datasets.Value("int32")],
72
+ "filename": datasets.Value("string"),
73
+ "imgid": datasets.Value("int32"),
74
+ "split": datasets.Value("string"),
75
+ "sentences": {
76
+ "tokens": [datasets.Value("string")],
77
+ "raw": datasets.Value("string"),
78
+ "imgid": datasets.Value("int32"),
79
+ "sentid": datasets.Value("int32"),
80
+ },
81
+ "cocoid": datasets.Value("int32"),
82
+ }
83
+ )
84
+
85
+ _FEATURES_CAPTIONS = datasets.Features(
86
+ {
87
+ "image": datasets.Image(),
88
+ "filepath": datasets.Value("string"),
89
+ "sentids": [datasets.Value("int32")],
90
+ "filename": datasets.Value("string"),
91
+ "imgid": datasets.Value("int32"),
92
+ "split": datasets.Value("string"),
93
+ "sentences_tokens": [[datasets.Value("string")]],
94
+ "sentences_raw": [datasets.Value("string")],
95
+ "sentences_sentid": [datasets.Value("int32")],
96
+ "cocoid": datasets.Value("int32"),
97
+ }
98
+ )
99
+
100
+
101
+ class COCO(datasets.GeneratorBasedBuilder):
102
+ """COCO"""
103
+
104
+ VERSION = datasets.Version("1.0.0")
105
+
106
+ BUILDER_CONFIGS = [
107
+ datasets.BuilderConfig(
108
+ name="2014", version=VERSION, description="2014 version of COCO with Karpathy annotations and splits"
109
+ ),
110
+ datasets.BuilderConfig(
111
+ name="2014_captions",
112
+ version=VERSION,
113
+ description="Same as 2014 but with all captions of one image gathered in a single example",
114
+ ),
115
+ ]
116
+
117
+ DEFAULT_CONFIG_NAME = "2014"
118
+
119
+ def _info(self):
120
+ return datasets.DatasetInfo(
121
+ description=_DESCRIPTION,
122
+ features=_FEATURES if self.config.name == "2014" else _FEATURES_CAPTIONS,
123
+ homepage=_HOMEPAGE,
124
+ license=_LICENSE,
125
+ citation=_CITATION,
126
+ )
127
+
128
+ def _split_generators(self, dl_manager):
129
+ annotation_file = os.path.join(dl_manager.download_and_extract(_KARPATHY_FILES_URL), "dataset_coco.json")
130
+ image_folders = {k: Path(v) for k, v in dl_manager.download_and_extract(_IMAGES_URLS).items()}
131
+
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TRAIN,
135
+ gen_kwargs={
136
+ "annotation_file": annotation_file,
137
+ "image_folders": image_folders,
138
+ "split_key": "train",
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.VALIDATION,
143
+ gen_kwargs={
144
+ "annotation_file": annotation_file,
145
+ "image_folders": image_folders,
146
+ "split_key": "validation",
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TEST,
151
+ gen_kwargs={
152
+ "annotation_file": annotation_file,
153
+ "image_folders": image_folders,
154
+ "split_key": "test",
155
+ },
156
+ ),
157
+ ]
158
+
159
+ def _generate_examples(self, annotation_file, image_folders, split_key):
160
+ if self.config.name == "2014_captions":
161
+ return self._generate_examples_2014_captions(annotation_file, image_folders, split_key)
162
+ elif self.config.name == "2014":
163
+ return self._generate_examples_2014(annotation_file, image_folders, split_key)
164
+
165
+ def _generate_examples_2014_captions(self, annotation_file, image_folders, split_key):
166
+ with open(annotation_file, "r", encoding="utf-8") as fi:
167
+ annotations = json.load(fi)
168
+
169
+ for image_metadata in annotations["images"]:
170
+ if split_key == "train":
171
+ if image_metadata["split"] != "train" and image_metadata["split"] != "restval":
172
+ continue
173
+ elif split_key == "validation":
174
+ if image_metadata["split"] != "val":
175
+ continue
176
+ elif split_key == "test":
177
+ if image_metadata["split"] != "test":
178
+ continue
179
+
180
+ if "val2014" in image_metadata["filename"]:
181
+ image_path = image_folders["validation"] / _SPLIT_MAP["validation"]
182
+ else:
183
+ image_path = image_folders["train"] / _SPLIT_MAP["train"]
184
+
185
+ image_path = image_path / image_metadata["filename"]
186
+
187
+ record = {
188
+ "image": str(image_path.absolute()),
189
+ "filepath": image_metadata["filename"],
190
+ "sentids": image_metadata["sentids"],
191
+ "filename": image_metadata["filename"],
192
+ "imgid": image_metadata["imgid"],
193
+ "split": image_metadata["split"],
194
+ "cocoid": image_metadata["cocoid"],
195
+ "sentences_tokens": [caption["tokens"] for caption in image_metadata["sentences"]],
196
+ "sentences_raw": [caption["raw"] for caption in image_metadata["sentences"]],
197
+ "sentences_sentid": [caption["sentid"] for caption in image_metadata["sentences"]],
198
+ }
199
+
200
+ yield record["imgid"], record
201
+
202
+ def _generate_examples_2014(self, annotation_file, image_folders, split_key):
203
+ counter = 0
204
+ with open(annotation_file, "r", encoding="utf-8") as fi:
205
+ annotations = json.load(fi)
206
+
207
+ for image_metadata in annotations["images"]:
208
+ if split_key == "train":
209
+ if image_metadata["split"] != "train" and image_metadata["split"] != "restval":
210
+ continue
211
+ elif split_key == "validation":
212
+ if image_metadata["split"] != "val":
213
+ continue
214
+ elif split_key == "test":
215
+ if image_metadata["split"] != "test":
216
+ continue
217
+
218
+ if "val2014" in image_metadata["filename"]:
219
+ image_path = image_folders["validation"] / _SPLIT_MAP["validation"]
220
+ else:
221
+ image_path = image_folders["train"] / _SPLIT_MAP["train"]
222
+
223
+ image_path = image_path / image_metadata["filename"]
224
+
225
+ for caption in image_metadata["sentences"]:
226
+ yield counter, {
227
+ "image": str(image_path.absolute()),
228
+ "filepath": image_metadata["filename"],
229
+ "sentids": image_metadata["sentids"],
230
+ "filename": image_metadata["filename"],
231
+ "imgid": image_metadata["imgid"],
232
+ "split": image_metadata["split"],
233
+ "sentences": {
234
+ "tokens": caption["tokens"],
235
+ "raw": caption["raw"],
236
+ "imgid": caption["imgid"],
237
+ "sentid": caption["sentid"],
238
+ },
239
+ "cocoid": image_metadata["cocoid"],
240
+ }
241
+ counter += 1