|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Dataset class for Food-101 dataset.""" |
|
|
|
import datasets |
|
from datasets.tasks import ImageClassification |
|
import json |
|
import requests |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/emanuelebezzecchi/trailerShotScale" |
|
|
|
_DESCRIPTION = ( |
|
"Shot scale has five categories: " |
|
"0) extreme close-up shot (ECS) shows even smaller parts such as the image of an eye or a mouth." |
|
"1) close-up shot (CS) concentrates on a relatively small object, showing the face of the hand of a person;" |
|
"2) medium shot (MS) contains a figure from the knees or waist up;" |
|
"3) full shot (FS) barely includes the human body in full;" |
|
"4) long shot (LS) is taken from a long distance, sometimes as far as a quarter of a mile away;" |
|
) |
|
|
|
_CITATION = """\ |
|
@inproceedings{rao2020unified, |
|
title={A Unified Framework for Shot Type Classification Based on Subject Centric Lens}, |
|
author={Rao, Anyi and Wang, Jiaze and Xu, Linning and Jiang, Xuekun and Huang, Qingqiu and Zhou, Bolei and Lin, Dahua}, |
|
booktitle = {The European Conference on Computer Vision (ECCV)}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
_LICENSE = """\ |
|
LICENSE AGREEMENT |
|
================= |
|
|
|
""" |
|
|
|
_NAMES = ["ECS","CS","MS","FS","LS"] |
|
_JSON_DIR = "https://huggingface.co/datasets/emanuelebezzecchi/trailerShotScale/resolve/main/data.json" |
|
_URL = "https://huggingface.co/datasets/emanuelebezzecchi/trailerShotScale/resolve/main/images.tar.gz" |
|
|
|
data = json.loads(requests.get(_JSON_DIR).content) |
|
imgLabels = data['labels'] |
|
|
|
|
|
class trailerShotScale(datasets.GeneratorBasedBuilder): |
|
"""trailerShotScale 10% of data, 5 images per folder respect to original""" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"label": datasets.ClassLabel(names=_NAMES), |
|
} |
|
), |
|
supervised_keys=("image", "label"), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
task_templates=[ImageClassification(image_column="image", label_column="label")], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
path = dl_manager.download(_URL) |
|
image_iters = dl_manager.iter_archive(path) |
|
return [datasets.SplitGenerator(datasets.Split.TRAIN,gen_kwargs={"images":image_iters,})] |
|
|
|
|
|
def _generate_examples(self, images): |
|
"""Generate images and labels for splits.""" |
|
idx = 0 |
|
|
|
for filepath,image in images: |
|
yield idx, { |
|
"image":{"path":filepath, "bytes":image.read()}, |
|
"label":imgLabels[idx] |
|
} |
|
idx += 1 |
|
|
|
|