|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""This dataset contains interior images created using DALL-E. |
|
The dataset contains 512x512 images split into 5 classes: |
|
* bathroom: 1000 images |
|
* bedroom: 1000 images |
|
* dining_room: 1000 images |
|
* kitchen: 1000 images |
|
* living_room: 1000 images |
|
""" |
|
|
|
import datasets |
|
from datasets.download.download_manager import DownloadManager |
|
from datasets.tasks import ImageClassification |
|
from pathlib import Path |
|
from typing import List, Iterator |
|
|
|
_ALLOWED_IMG_EXT = {".png", ".jpg"} |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {Computer Generated interior images}, |
|
author={Padilla, Rafael}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This new dataset contains CG interior images representing interior of houses in 5 classes, with \ |
|
1000 images per class. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/rafaelpadilla/interior-cgi/" |
|
|
|
_LICENSE = "" |
|
|
|
_URLS = { |
|
"test": "https://huggingface.co/datasets/rafaelpadilla/interior-cgi/resolve/main/data/test.zip", |
|
"train": "https://huggingface.co/datasets/rafaelpadilla/interior-cgi/resolve/main/data/train.zip", |
|
} |
|
|
|
_NAMES = ["bathroom", "bedroom", "dining_room", "kitchen", "living_room"] |
|
|
|
|
|
class CGInteriorDataset(datasets.GeneratorBasedBuilder): |
|
"""CGInterior: Computer Generated Interior images dataset""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
""" |
|
Returns the dataset metadata and features. |
|
|
|
Returns: |
|
DatasetInfo: Metadata and features of the dataset. |
|
""" |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"label_id": datasets.features.ClassLabel(names=_NAMES), |
|
"label_name": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=("image", "label_id"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
task_templates=[ |
|
ImageClassification(image_column="image", label_column="label_id") |
|
], |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
""" |
|
Provides the split information and downloads the data. |
|
|
|
Args: |
|
dl_manager (DownloadManager): The DownloadManager to use for downloading and extracting data. |
|
|
|
Returns: |
|
List[SplitGenerator]: List of SplitGenerator objects representing the data splits. |
|
""" |
|
data_files = dl_manager.download_and_extract(_URLS) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"files": dl_manager.iter_files(data_files["train"]), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"files": dl_manager.iter_files(data_files["test"]), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, files: List[str]) -> Iterator: |
|
""" |
|
Generates examples for the dataset. |
|
|
|
Args: |
|
files (List[str]): List of image paths. |
|
|
|
Yields: |
|
Dict[str, Union[str, Image]]: A dictionary containing the generated examples. |
|
""" |
|
for idx, img_path in enumerate(files): |
|
path = Path(img_path) |
|
if path.suffix in _ALLOWED_IMG_EXT: |
|
yield idx, { |
|
"image": img_path, |
|
"label_id": path.parent.name, |
|
"label_name": path.parent.name, |
|
} |
|
|