|
import random |
|
|
|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@misc{black2023vader, |
|
title={VADER: Video Alignment Differencing and Retrieval}, |
|
author={Alexander Black and Simon Jenni and Tu Bui and Md. Mehrab Tanjim and Stefano Petrangeli and Ritwik Sinha and Viswanathan Swaminathan and John Collomosse}, |
|
year={2023}, |
|
eprint={2303.13193}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
ANAKIN is a dataset of mANipulated videos and mAsK annotatIoNs. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/AlexBlck/vader" |
|
|
|
_LICENSE = "cc-by-4.0" |
|
|
|
_METADATA_URL = "https://huggingface.co/datasets/AlexBlck/ANAKIN/raw/main/metadata.csv" |
|
|
|
_FOLDERS = { |
|
"all": ("full", "trimmed", "edited", "masks"), |
|
"no-full": ("trimmed", "edited", "masks"), |
|
"has-masks": ("trimmed", "edited", "masks"), |
|
"full-masks": ("full", "trimmed", "edited", "masks"), |
|
} |
|
|
|
|
|
class Anakin(datasets.GeneratorBasedBuilder): |
|
"""ANAKIN is a dataset of mANipulated videos and mAsK annotatIoNs.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="all", |
|
version=VERSION, |
|
description="Full video, trimmed video, edited video, masks (if exists), and edit description", |
|
), |
|
datasets.BuilderConfig( |
|
name="no-full", |
|
version=VERSION, |
|
description="Trimmed video, edited video, masks (if exists), and edit description", |
|
), |
|
datasets.BuilderConfig( |
|
name="has-masks", |
|
version=VERSION, |
|
description="Only samples that have masks. Without full length video.", |
|
), |
|
datasets.BuilderConfig( |
|
name="full-masks", |
|
version=VERSION, |
|
description="Only samples that have masks. With full length video.", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
if self.config.name == "all": |
|
features = datasets.Features( |
|
{ |
|
"full": datasets.Value("string"), |
|
"trimmed": datasets.Value("string"), |
|
"edited": datasets.Value("string"), |
|
"masks": datasets.Sequence(datasets.Image()), |
|
"task": datasets.Value("string"), |
|
"start-time": datasets.Value("int32"), |
|
"end-time": datasets.Value("int32"), |
|
"manipulation-type": datasets.Value("string"), |
|
"editor-id": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "no-full": |
|
features = datasets.Features( |
|
{ |
|
"trimmed": datasets.Value("string"), |
|
"edited": datasets.Value("string"), |
|
"masks": datasets.Sequence(datasets.Image()), |
|
"task": datasets.Value("string"), |
|
"manipulation-type": datasets.Value("string"), |
|
"editor-id": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "has-masks": |
|
features = datasets.Features( |
|
{ |
|
"trimmed": datasets.Value("string"), |
|
"edited": datasets.Value("string"), |
|
"masks": datasets.Sequence(datasets.Image()), |
|
"task": datasets.Value("string"), |
|
"manipulation-type": datasets.Value("string"), |
|
"editor-id": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "full-masks": |
|
features = datasets.Features( |
|
{ |
|
"full": datasets.Value("string"), |
|
"trimmed": datasets.Value("string"), |
|
"edited": datasets.Value("string"), |
|
"masks": datasets.Sequence(datasets.Image()), |
|
"task": datasets.Value("string"), |
|
"start-time": datasets.Value("int32"), |
|
"end-time": datasets.Value("int32"), |
|
"manipulation-type": datasets.Value("string"), |
|
"editor-id": datasets.Value("string"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
metadata_dir = dl_manager.download(_METADATA_URL) |
|
folders = _FOLDERS[self.config.name] |
|
|
|
random.seed(47) |
|
root_url = "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/" |
|
df = pd.read_csv(metadata_dir) |
|
if "full" in folders: |
|
df = df[df["full-available"] == True] |
|
if "-masks" in self.config.name: |
|
df = df[df["has-masks"] == True] |
|
|
|
ids = df["video-id"].to_list() |
|
random.shuffle(ids) |
|
|
|
train_end = int(len(ids) * 0.7) |
|
val_end = int(len(ids) * 0.8) |
|
split_ids = { |
|
datasets.Split.TRAIN: ids[:train_end], |
|
datasets.Split.VALIDATION: ids[train_end:val_end], |
|
datasets.Split.TEST: ids[val_end:], |
|
} |
|
|
|
data_dir = {} |
|
mask_dir = {} |
|
|
|
for split in [ |
|
datasets.Split.TRAIN, |
|
datasets.Split.VALIDATION, |
|
datasets.Split.TEST, |
|
]: |
|
data_urls = [ |
|
{ |
|
f"{folder}": root_url + f"{folder}/{idx}.mp4" |
|
for folder in folders |
|
if folder != "masks" |
|
} |
|
for idx in split_ids[split] |
|
] |
|
data_dir[split] = dl_manager.download(data_urls) |
|
mask_dir[split] = { |
|
idx: dl_manager.iter_archive( |
|
dl_manager.download(root_url + f"masks/{idx}.zip") |
|
) |
|
for idx in split_ids[split] |
|
if df[df["video-id"] == idx]["has-masks"].values[0] |
|
} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={ |
|
"files": data_dir[split], |
|
"masks": mask_dir[split], |
|
"df": df, |
|
"ids": split_ids[split], |
|
"return_time": "full" in folders, |
|
}, |
|
) |
|
for split in [ |
|
datasets.Split.TRAIN, |
|
datasets.Split.VALIDATION, |
|
datasets.Split.TEST, |
|
] |
|
] |
|
|
|
def _generate_examples(self, files, masks, df, ids, return_time): |
|
for key, (idx, sample) in enumerate(zip(ids, files)): |
|
entry = df[df["video-id"] == idx] |
|
if idx in masks.keys(): |
|
sample["masks"] = [ |
|
{"path": p, "bytes": im.read()} for p, im in masks[idx] |
|
] |
|
else: |
|
sample["masks"] = None |
|
sample["task"] = entry["task"].values[0] |
|
sample["manipulation-type"] = entry["manipulation-type"].values[0] |
|
sample["editor-id"] = entry["editor-id"].values[0] |
|
if return_time: |
|
sample["start-time"] = entry["start-time"].values[0] |
|
sample["end-time"] = entry["end-time"].values[0] |
|
yield key, sample |
|
|