Update script to hub
Browse files- Boat_dataset.py +58 -24
Boat_dataset.py
CHANGED
@@ -38,67 +38,101 @@ _URLS = {
|
|
38 |
class BoatDataset(datasets.GeneratorBasedBuilder):
|
39 |
|
40 |
VERSION = datasets.Version("1.1.0")
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
def _info(self):
|
43 |
-
features=datasets.Features({
|
44 |
-
'image_id': datasets.Value('int32'),
|
45 |
-
'image_path': datasets.Value('string'),
|
46 |
-
'width': datasets.Value('int32'),
|
47 |
-
'height': datasets.Value('int32'),
|
48 |
-
'objects': datasets.Features({
|
49 |
-
'id': datasets.Sequence(datasets.Value('int32')),
|
50 |
-
'area': datasets.Sequence(datasets.Value('float32')),
|
51 |
-
'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)), # [x, y, width, height]
|
52 |
-
'category': datasets.Sequence(datasets.Value('int32'))
|
53 |
-
}),
|
54 |
-
})
|
55 |
-
|
56 |
return datasets.DatasetInfo(
|
57 |
description=_DESCRIPTION,
|
58 |
-
features=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
homepage=_HOMEPAGE,
|
60 |
license=_LICENSE,
|
61 |
citation=_CITATION,
|
62 |
)
|
63 |
|
64 |
def _split_generators(self, dl_manager):
|
|
|
65 |
downloaded_files = dl_manager.download_and_extract(_URLS)
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
return [
|
68 |
datasets.SplitGenerator(
|
69 |
name=datasets.Split.TRAIN,
|
70 |
gen_kwargs={
|
71 |
-
|
72 |
-
|
|
|
|
|
73 |
}
|
74 |
),
|
75 |
datasets.SplitGenerator(
|
76 |
name=datasets.Split.VALIDATION,
|
77 |
gen_kwargs={
|
78 |
-
|
79 |
-
|
|
|
|
|
80 |
}
|
81 |
),
|
82 |
datasets.SplitGenerator(
|
83 |
name=datasets.Split.TEST,
|
84 |
gen_kwargs={
|
85 |
-
|
86 |
-
|
|
|
|
|
87 |
}
|
88 |
-
)
|
89 |
]
|
90 |
|
91 |
-
def _generate_examples(self, annotations_file, split):
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
with open(annotations_file, encoding="utf-8") as f:
|
93 |
for key, row in enumerate(f):
|
94 |
try:
|
95 |
data = json.loads(row.strip())
|
|
|
|
|
|
|
96 |
yield key, {
|
97 |
"image_id": data["image_id"],
|
98 |
-
"image_path":
|
99 |
"width": data["width"],
|
100 |
"height": data["height"],
|
101 |
-
"objects":
|
|
|
|
|
|
|
|
|
|
|
102 |
}
|
103 |
except json.JSONDecodeError:
|
104 |
print(f"Skipping invalid JSON at line {key + 1}: {row}")
|
|
|
38 |
class BoatDataset(datasets.GeneratorBasedBuilder):
|
39 |
|
40 |
VERSION = datasets.Version("1.1.0")
|
41 |
+
|
42 |
+
BUILDER_CONFIGS = [
|
43 |
+
datasets.BuilderConfig(name="Boat_dataset", version=VERSION, description="Dataset for detecting boats in aerial images."),
|
44 |
+
]
|
45 |
+
|
46 |
+
DEFAULT_CONFIG_NAME = "Boat_dataset" # Provide a default configuration
|
47 |
|
48 |
def _info(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
return datasets.DatasetInfo(
|
50 |
description=_DESCRIPTION,
|
51 |
+
features=datasets.Features({
|
52 |
+
'image_id': datasets.Value('int32'),
|
53 |
+
'image_path': datasets.Value('string'),
|
54 |
+
'width': datasets.Value('int32'),
|
55 |
+
'height': datasets.Value('int32'),
|
56 |
+
'objects': datasets.Features({
|
57 |
+
'id': datasets.Sequence(datasets.Value('int32')),
|
58 |
+
'area': datasets.Sequence(datasets.Value('float32')),
|
59 |
+
'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)), # [x, y, width, height]
|
60 |
+
'category': datasets.Sequence(datasets.Value('int32'))
|
61 |
+
}),
|
62 |
+
}),
|
63 |
homepage=_HOMEPAGE,
|
64 |
license=_LICENSE,
|
65 |
citation=_CITATION,
|
66 |
)
|
67 |
|
68 |
def _split_generators(self, dl_manager):
|
69 |
+
# Download all files and extract them
|
70 |
downloaded_files = dl_manager.download_and_extract(_URLS)
|
71 |
|
72 |
+
# Paths to files and archives
|
73 |
+
image_archive_path = downloaded_files["images"]
|
74 |
+
classes_file_path = downloaded_files["classes"]
|
75 |
+
annotations_paths = downloaded_files["anno"]
|
76 |
+
|
77 |
+
# Load class labels from the classes file
|
78 |
+
with open(classes_file_path, 'r') as file:
|
79 |
+
classes = [line.strip() for line in file.readlines()]
|
80 |
+
|
81 |
return [
|
82 |
datasets.SplitGenerator(
|
83 |
name=datasets.Split.TRAIN,
|
84 |
gen_kwargs={
|
85 |
+
"image_archive_path": image_archive_path,
|
86 |
+
"annotations_file": annotations_paths["train"],
|
87 |
+
"classes": classes,
|
88 |
+
"split": "train",
|
89 |
}
|
90 |
),
|
91 |
datasets.SplitGenerator(
|
92 |
name=datasets.Split.VALIDATION,
|
93 |
gen_kwargs={
|
94 |
+
"image_archive_path": image_archive_path,
|
95 |
+
"annotations_file": annotations_paths["val"],
|
96 |
+
"classes": classes,
|
97 |
+
"split": "val",
|
98 |
}
|
99 |
),
|
100 |
datasets.SplitGenerator(
|
101 |
name=datasets.Split.TEST,
|
102 |
gen_kwargs={
|
103 |
+
"image_archive_path": image_archive_path,
|
104 |
+
"annotations_file": annotations_paths["test"],
|
105 |
+
"classes": classes,
|
106 |
+
"split": "val_real",
|
107 |
}
|
108 |
+
),
|
109 |
]
|
110 |
|
111 |
+
def _generate_examples(self, dl_manager, image_archive_path, annotations_file, classes, split):
|
112 |
+
# Open the archive using iter_archive to access files without extracting them
|
113 |
+
with dl_manager.iter_archive(image_archive_path) as archive:
|
114 |
+
# Convert path: file-like object pairs to a dict for quick access
|
115 |
+
images_dict = {os.path.basename(path): file for path, file in archive}
|
116 |
+
|
117 |
+
# Process annotations
|
118 |
with open(annotations_file, encoding="utf-8") as f:
|
119 |
for key, row in enumerate(f):
|
120 |
try:
|
121 |
data = json.loads(row.strip())
|
122 |
+
file = images_dict.get(data["file_name"])
|
123 |
+
if not file:
|
124 |
+
continue # Skip if file is not found in the archive
|
125 |
yield key, {
|
126 |
"image_id": data["image_id"],
|
127 |
+
"image_path": file, # You can save the path or file object depending on your processing needs
|
128 |
"width": data["width"],
|
129 |
"height": data["height"],
|
130 |
+
"objects": {
|
131 |
+
"id": data["objects"]["id"],
|
132 |
+
"area": data["objects"]["area"],
|
133 |
+
"bbox": data["objects"]["bbox"],
|
134 |
+
"category": [classes[idx] for idx in data["objects"]["category"]]
|
135 |
+
},
|
136 |
}
|
137 |
except json.JSONDecodeError:
|
138 |
print(f"Skipping invalid JSON at line {key + 1}: {row}")
|