text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
) | 112 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
module_path, hash = _PACKAGED_DATASETS_MODULES[self.name]
builder_kwargs = {
"data_files": data_files,
"dataset_name": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs) | 112 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory):
"""
Get the module of a dataset loaded from data files of a dataset repository.
The dataset builder module to use is inferred from the data files extensions.
"""
def __init__(
self,
name: str,
commit_hash: str,
data_dir: Optional[str] = None,
data_files: Optional[Union[str, List, Dict]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
use_exported_dataset_infos: bool = False,
):
self.name = name
self.commit_hash = commit_hash
self.data_files = data_files
self.data_dir = data_dir
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.use_exported_dataset_infos = use_exported_dataset_infos
increase_load_count(name) | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
def get_module(self) -> DatasetModule:
# Get the Dataset Card and fix the revision in case there are new commits in the meantime
api = HfApi(
endpoint=config.HF_ENDPOINT,
token=self.download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(self.download_config.user_agent),
)
try:
dataset_readme_path = api.hf_hub_download(
repo_id=self.name,
filename=config.REPOCARD_FILENAME,
repo_type="dataset",
revision=self.commit_hash,
proxies=self.download_config.proxies,
)
dataset_card_data = DatasetCard.load(dataset_readme_path).data
except EntryNotFoundError:
dataset_card_data = DatasetCardData()
download_config = self.download_config.copy()
if download_config.download_desc is None: | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
download_config.download_desc = "Downloading standalone yaml"
try:
standalone_yaml_path = cached_path(
hf_dataset_url(self.name, config.REPOYAML_FILENAME, revision=self.commit_hash),
download_config=download_config,
)
with open(standalone_yaml_path, "r", encoding="utf-8") as f:
standalone_yaml_data = yaml.safe_load(f.read())
if standalone_yaml_data:
_dataset_card_data_dict = dataset_card_data.to_dict()
_dataset_card_data_dict.update(standalone_yaml_data)
dataset_card_data = DatasetCardData(**_dataset_card_data_dict)
except FileNotFoundError:
pass
base_path = f"hf://datasets/{self.name}@{self.commit_hash}/{self.data_dir or ''}".rstrip("/")
metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data) | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
if config.USE_PARQUET_EXPORT and self.use_exported_dataset_infos:
try:
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
exported_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
for config_name in exported_dataset_infos
}
)
except _dataset_viewer.DatasetViewerError:
exported_dataset_infos = None
else:
exported_dataset_infos = None
if exported_dataset_infos:
exported_dataset_infos.update(dataset_infos)
dataset_infos = exported_dataset_infos
# we need a set of data files to find which dataset builder to use
# because we need to infer module name by files extensions | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
if self.data_files is not None:
patterns = sanitize_patterns(self.data_files)
elif metadata_configs and not self.data_dir and "data_files" in next(iter(metadata_configs.values())):
patterns = sanitize_patterns(next(iter(metadata_configs.values()))["data_files"])
else:
patterns = get_data_patterns(base_path, download_config=self.download_config)
data_files = DataFilesDict.from_patterns(
patterns,
base_path=base_path,
allowed_extensions=ALL_ALLOWED_EXTENSIONS,
download_config=self.download_config,
)
module_name, default_builder_kwargs = infer_module_for_data_files(
data_files=data_files,
path=self.name,
download_config=self.download_config,
)
data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name])
# Collect metadata files if the module supports them | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
supports_metadata = module_name in _MODULE_SUPPORTS_METADATA
if self.data_files is None and supports_metadata:
try:
metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config)
except FileNotFoundError:
metadata_patterns = None
if metadata_patterns is not None:
metadata_data_files_list = DataFilesList.from_patterns(
metadata_patterns, download_config=self.download_config, base_path=base_path
)
if metadata_data_files_list:
data_files = DataFilesDict(
{
split: data_files_list + metadata_data_files_list
for split, data_files_list in data_files.items()
}
) | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
module_path, _ = _PACKAGED_DATASETS_MODULES[module_name]
if metadata_configs:
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
base_path=base_path,
supports_metadata=supports_metadata,
default_builder_kwargs=default_builder_kwargs,
download_config=self.download_config,
)
else:
builder_configs: List[BuilderConfig] = [
import_main_class(module_path).BUILDER_CONFIG_CLASS(
data_files=data_files,
**default_builder_kwargs,
)
]
default_config_name = None
builder_kwargs = {
"base_path": hf_dataset_url(self.name, "", revision=self.commit_hash).rstrip("/"),
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
} | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
if self.data_dir:
builder_kwargs["data_files"] = data_files
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
# this file is deprecated and was created automatically in old versions of push_to_hub
dataset_infos_path = cached_path(
hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.commit_hash),
download_config=download_config,
)
with open(dataset_infos_path, encoding="utf-8") as f:
legacy_dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(dataset_info_dict)
for config_name, dataset_info_dict in json.load(f).items()
}
)
if len(legacy_dataset_infos) == 1: | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
# old config e.g. named "username--dataset_name"
legacy_config_name = next(iter(legacy_dataset_infos))
legacy_dataset_infos["default"] = legacy_dataset_infos.pop(legacy_config_name)
legacy_dataset_infos.update(dataset_infos)
dataset_infos = legacy_dataset_infos
except FileNotFoundError:
pass
if default_config_name is None and len(dataset_infos) == 1:
default_config_name = next(iter(dataset_infos)) | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
return DatasetModule(
module_path,
self.commit_hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
) | 113 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory):
"""
Get the module of a dataset loaded from parquet files of a dataset repository parquet export.
"""
def __init__(
self,
name: str,
commit_hash: str,
download_config: Optional[DownloadConfig] = None,
):
self.name = name
self.commit_hash = commit_hash
self.download_config = download_config or DownloadConfig()
increase_load_count(name) | 114 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
def get_module(self) -> DatasetModule:
exported_parquet_files = _dataset_viewer.get_exported_parquet_files(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(
dataset=self.name, commit_hash=self.commit_hash, token=self.download_config.token
)
dataset_infos = DatasetInfosDict(
{
config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name])
for config_name in exported_dataset_infos
}
)
parquet_commit_hash = (
HfApi(
endpoint=config.HF_ENDPOINT,
token=self.download_config.token,
library_name="datasets",
library_version=__version__,
user_agent=get_datasets_user_agent(self.download_config.user_agent),
)
.dataset_info(
self.name, | 114 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
revision="refs/convert/parquet",
token=self.download_config.token,
timeout=100.0,
)
.sha
) # fix the revision in case there are new commits in the meantime
metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(
parquet_commit_hash=parquet_commit_hash,
exported_parquet_files=exported_parquet_files,
dataset_infos=dataset_infos,
)
module_path, _ = _PACKAGED_DATASETS_MODULES["parquet"]
builder_configs, default_config_name = create_builder_configs_from_metadata_configs(
module_path,
metadata_configs,
supports_metadata=False,
download_config=self.download_config,
)
builder_kwargs = {
"repo_id": self.name,
"dataset_name": camelcase_to_snakecase(Path(self.name).name),
} | 114 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
return DatasetModule(
module_path,
self.commit_hash,
builder_kwargs,
dataset_infos=dataset_infos,
builder_configs_parameters=BuilderConfigsParameters(
metadata_configs=metadata_configs,
builder_configs=builder_configs,
default_config_name=default_config_name,
),
) | 114 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory):
"""
Get the module of a dataset from a dataset repository.
The dataset script comes from the script inside the dataset repository.
"""
def __init__(
self,
name: str,
commit_hash: str,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[Union[DownloadMode, str]] = None,
dynamic_modules_path: Optional[str] = None,
trust_remote_code: Optional[bool] = None,
):
self.name = name
self.commit_hash = commit_hash
self.download_config = download_config or DownloadConfig()
self.download_mode = download_mode
self.dynamic_modules_path = dynamic_modules_path
self.trust_remote_code = trust_remote_code
increase_load_count(name) | 115 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
def download_loading_script(self) -> str:
file_path = hf_dataset_url(self.name, self.name.split("/")[-1] + ".py", revision=self.commit_hash)
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading builder script"
return cached_path(file_path, download_config=download_config)
def download_dataset_infos_file(self) -> str:
dataset_infos = hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.commit_hash)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading metadata"
try:
return cached_path(
dataset_infos,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None | 115 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
def download_dataset_readme_file(self) -> str:
readme_url = hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=self.commit_hash)
# Download the dataset infos file if available
download_config = self.download_config.copy()
if download_config.download_desc is None:
download_config.download_desc = "Downloading readme"
try:
return cached_path(
readme_url,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
return None | 115 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
def get_module(self) -> DatasetModule:
if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None:
warnings.warn(
f"The repository for {self.name} contains custom code which must be executed to correctly "
f"load the dataset. You can inspect the repository content at https://hf.co/datasets/{self.name}\n"
f"You can avoid this message in future by passing the argument `trust_remote_code=True`.\n"
f"Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.",
FutureWarning,
)
# get script and other files
local_path = self.download_loading_script()
dataset_infos_path = self.download_dataset_infos_file()
dataset_readme_path = self.download_dataset_readme_file()
imports = get_imports(local_path)
local_imports, library_imports = _download_additional_modules( | 115 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
name=self.name,
base_path=hf_dataset_url(self.name, "", revision=self.commit_hash),
imports=imports,
download_config=self.download_config,
)
additional_files = []
if dataset_infos_path:
additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path))
if dataset_readme_path:
additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path))
# copy the script and the files in an importable directory
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
if not os.path.exists(importable_file_path): | 115 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name)
if trust_remote_code:
_create_importable_file(
local_path=local_path,
local_imports=local_imports,
additional_files=additional_files,
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
download_mode=self.download_mode,
)
else:
raise ValueError(
f"Loading {self.name} requires you to execute the dataset script in that"
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
" set the option `trust_remote_code=True` to remove this error."
) | 115 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
_check_library_imports(name=self.name, library_imports=library_imports)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {
"base_path": hf_dataset_url(self.name, "", revision=self.commit_hash).rstrip("/"),
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path) | 115 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
class CachedDatasetModuleFactory(_DatasetModuleFactory):
"""
Get the module of a dataset that has been loaded once already and cached.
The script that is loaded from the cache is the most recent one with a matching name.
"""
def __init__(
self,
name: str,
cache_dir: Optional[str] = None,
dynamic_modules_path: Optional[str] = None,
):
self.name = name
self.cache_dir = cache_dir
self.dynamic_modules_path = dynamic_modules_path
assert self.name.count("/") <= 1 | 116 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
def get_module(self) -> DatasetModule:
dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules()
importable_directory_path = os.path.join(dynamic_modules_path, "datasets", self.name.replace("/", "--"))
hashes = (
[h for h in os.listdir(importable_directory_path) if len(h) == 64]
if os.path.isdir(importable_directory_path)
else None
)
if hashes:
# get most recent
def _get_modification_time(module_hash):
return (
(Path(importable_directory_path) / module_hash / (self.name.split("/")[-1] + ".py"))
.stat()
.st_mtime
) | 116 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
hash = sorted(hashes, key=_get_modification_time)[-1]
warning_msg = (
f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {self.name}"
)
if not config.HF_HUB_OFFLINE:
warning_msg += ", or remotely on the Hugging Face Hub."
logger.warning(warning_msg)
importable_file_path = _get_importable_file_path(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name,
)
module_path, hash = _load_importable_file(
dynamic_modules_path=dynamic_modules_path,
module_namespace="datasets",
subdirectory_name=hash,
name=self.name, | 116 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
)
# make the new module to be noticed by the import system
importlib.invalidate_caches()
builder_kwargs = {
"repo_id": self.name,
}
return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path)
cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE))
namespace_and_dataset_name = self.name.split("/")
namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1])
cached_relative_path = "___".join(namespace_and_dataset_name)
cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path)
cached_directory_paths = [
cached_directory_path
for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", "*", "*"))
if os.path.isdir(cached_directory_path)
]
if cached_directory_paths: | 116 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
builder_kwargs = {
"repo_id": self.name,
"dataset_name": self.name.split("/")[-1],
}
warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub"
if config.HF_HUB_OFFLINE:
warning_msg += " (offline mode is enabled)."
logger.warning(warning_msg)
return DatasetModule(
"datasets.packaged_modules.cache.cache",
"auto",
{**builder_kwargs, "version": "auto"},
)
raise FileNotFoundError(f"Dataset {self.name} is not cached in {self.cache_dir}") | 116 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/load.py |
class SupervisedKeysData:
input: str = ""
output: str = "" | 117 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
class DownloadChecksumsEntryData:
key: str = ""
value: str = "" | 118 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
class MissingCachedSizesConfigError(Exception):
"""The expected cached sizes of the download file are missing.""" | 119 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
class NonMatchingCachedSizesError(Exception):
"""The prepared split doesn't have expected sizes.""" | 120 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
class PostProcessedInfo:
features: Optional[Features] = None
resources_checksums: Optional[dict] = None
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
@classmethod
def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names}) | 121 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
class DatasetInfo:
"""Information about a dataset.
`DatasetInfo` documents datasets, including its name, version, and features.
See the constructor arguments and properties for a full list.
Not all fields are known on construction and may be updated later. | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
Attributes:
description (`str`):
A description of the dataset.
citation (`str`):
A BibTeX citation of the dataset.
homepage (`str`):
A URL to the official homepage for the dataset.
license (`str`):
The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
features ([`Features`], *optional*):
The features used to specify the dataset's column types.
post_processed (`PostProcessedInfo`, *optional*):
Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
supervised_keys (`SupervisedKeysData`, *optional*):
Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
builder_name (`str`, *optional*): | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
config_name (`str`, *optional*):
The name of the configuration derived from [`BuilderConfig`].
version (`str` or [`Version`], *optional*):
The version of the dataset.
splits (`dict`, *optional*):
The mapping between split name and metadata.
download_checksums (`dict`, *optional*):
The mapping between the URL to download the dataset's checksums and corresponding metadata.
download_size (`int`, *optional*):
The size of the files to download to generate the dataset, in bytes.
post_processing_size (`int`, *optional*):
Size of the dataset in bytes after post-processing, if any.
dataset_size (`int`, *optional*): | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
The combined size in bytes of the Arrow tables for all splits.
size_in_bytes (`int`, *optional*):
The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
**config_kwargs (additional keyword arguments):
Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
""" | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
# Set in the dataset scripts
description: str = dataclasses.field(default_factory=str)
citation: str = dataclasses.field(default_factory=str)
homepage: str = dataclasses.field(default_factory=str)
license: str = dataclasses.field(default_factory=str)
features: Optional[Features] = None
post_processed: Optional[PostProcessedInfo] = None
supervised_keys: Optional[SupervisedKeysData] = None
# Set later by the builder
builder_name: Optional[str] = None
dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
config_name: Optional[str] = None
version: Optional[Union[str, Version]] = None
# Set later by `download_and_prepare`
splits: Optional[dict] = None
download_checksums: Optional[dict] = None
download_size: Optional[int] = None
post_processing_size: Optional[int] = None
dataset_size: Optional[int] = None
size_in_bytes: Optional[int] = None | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
_INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
"config_name",
"download_size",
"dataset_size",
"features",
"splits",
] | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
def __post_init__(self):
# Convert back to the correct classes when we reload from dict
if self.features is not None and not isinstance(self.features, Features):
self.features = Features.from_dict(self.features)
if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
if self.version is not None and not isinstance(self.version, Version):
if isinstance(self.version, str):
self.version = Version(self.version)
else:
self.version = Version.from_dict(self.version)
if self.splits is not None and not isinstance(self.splits, SplitDict):
self.splits = SplitDict.from_split_dict(self.splits)
if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
if isinstance(self.supervised_keys, (tuple, list)): | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
else:
self.supervised_keys = SupervisedKeysData(**self.supervised_keys) | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict] = None):
"""Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
Args:
dataset_info_dir (`str`):
Destination directory.
pretty_print (`bool`, defaults to `False`):
If `True`, the JSON will be pretty-printed with the indent level of 4.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example: | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="validation")
>>> ds.info.write_to_directory("/path/to/directory/")
```
"""
fs: fsspec.AbstractFileSystem
fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
self._dump_info(f, pretty_print=pretty_print)
if self.license:
with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
self._dump_license(f)
def _dump_info(self, file, pretty_print=False):
"""Dump info in `file` file-like object open in bytes mode (to support remote files)"""
file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8")) | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
def _dump_license(self, file):
"""Dump license in `file` file-like object open in bytes mode (to support remote files)"""
file.write(self.license.encode("utf-8"))
@classmethod
def from_merge(cls, dataset_infos: List["DatasetInfo"]):
dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
# if all dataset_infos are equal we don't need to merge. Just return the first.
return dataset_infos[0] | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
features = None
supervised_keys = None
return cls(
description=description,
citation=citation,
homepage=homepage,
license=license,
features=features,
supervised_keys=supervised_keys,
)
@classmethod
def from_directory(cls, dataset_info_dir: str, storage_options: Optional[dict] = None) -> "DatasetInfo":
"""Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
This function updates all the dynamically generated fields (num_examples,
hash, time of creation,...) of the [`DatasetInfo`]. | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
This will overwrite all previous metadata.
Args:
dataset_info_dir (`str`):
The directory containing the metadata file. This
should be the root directory of a specific dataset version.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.9.0"/>
Example: | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
```py
>>> from datasets import DatasetInfo
>>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
```
"""
fs: fsspec.AbstractFileSystem
fs, *_ = url_to_fs(dataset_info_dir, **(storage_options or {}))
logger.info(f"Loading Dataset info from {dataset_info_dir}")
if not dataset_info_dir:
raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
dataset_info_dict = json.load(f)
return cls.from_dict(dataset_info_dict)
@classmethod
def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names}) | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
self_dict = self.__dict__
self_dict.update(
**{
k: copy.deepcopy(v)
for k, v in other_dataset_info.__dict__.items()
if (v is not None or not ignore_none)
}
)
def copy(self) -> "DatasetInfo":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()}) | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
def _to_yaml_dict(self) -> dict:
yaml_dict = {}
dataset_info_dict = asdict(self)
for key in dataset_info_dict:
if key in self._INCLUDED_INFO_IN_YAML:
value = getattr(self, key)
if hasattr(value, "_to_yaml_list"): # Features, SplitDict
yaml_dict[key] = value._to_yaml_list()
elif hasattr(value, "_to_yaml_string"): # Version
yaml_dict[key] = value._to_yaml_string()
else:
yaml_dict[key] = value
return yaml_dict | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
@classmethod
def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
yaml_data = copy.deepcopy(yaml_data)
if yaml_data.get("features") is not None:
yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
if yaml_data.get("splits") is not None:
yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
field_names = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in yaml_data.items() if k in field_names}) | 122 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
class DatasetInfosDict(Dict[str, DatasetInfo]):
def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
total_dataset_infos = {}
dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
if not overwrite:
total_dataset_infos = self.from_directory(dataset_infos_dir)
total_dataset_infos.update(self)
if os.path.exists(dataset_infos_path):
# for backward compatibility, let's update the JSON file if it exists
with open(dataset_infos_path, "w", encoding="utf-8") as f:
dataset_infos_dict = {
config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
}
json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
# Dump the infos in the YAML part of the README.md file | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
if os.path.exists(dataset_readme_path):
dataset_card = DatasetCard.load(dataset_readme_path)
dataset_card_data = dataset_card.data
else:
dataset_card = None
dataset_card_data = DatasetCardData()
if total_dataset_infos:
total_dataset_infos.to_dataset_card_data(dataset_card_data)
dataset_card = (
DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
)
dataset_card.save(Path(dataset_readme_path)) | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
@classmethod
def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
# Load the info from the YAML part of README.md
if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
if "dataset_info" in dataset_card_data:
return cls.from_dataset_card_data(dataset_card_data)
if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
# this is just to have backward compatibility with dataset_infos.json files
with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
return cls(
{
config_name: DatasetInfo.from_dict(dataset_info_dict) | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
for config_name, dataset_info_dict in json.load(f).items()
}
)
else:
return cls() | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
@classmethod
def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
if isinstance(dataset_card_data["dataset_info"], list):
return cls(
{
dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
dataset_info_yaml_dict
)
for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
}
)
else:
dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
return cls({dataset_info.config_name: dataset_info})
else:
return cls() | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
if self:
# first get existing metadata info
if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
dataset_metadata_infos = {
dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
}
elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
dataset_metadata_infos = {
config_metadata["config_name"]: config_metadata
for config_metadata in dataset_card_data["dataset_info"]
}
else:
dataset_metadata_infos = {}
# update/rewrite existing metadata info with the one to dump
total_dataset_infos = {
**dataset_metadata_infos, | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
**{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
}
# the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
for config_name, dset_info_yaml_dict in total_dataset_infos.items():
dset_info_yaml_dict["config_name"] = config_name
if len(total_dataset_infos) == 1:
# use a struct instead of a list of configurations, since there's only one
dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
config_name = dataset_card_data["dataset_info"].pop("config_name", None)
if config_name != "default":
# if config_name is not "default" preserve it and put at the first position
dataset_card_data["dataset_info"] = {
"config_name": config_name,
**dataset_card_data["dataset_info"],
} | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
else:
dataset_card_data["dataset_info"] = []
for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
# add the config_name field in first position
dataset_info_yaml_dict.pop("config_name", None)
dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
dataset_card_data["dataset_info"].append(dataset_info_yaml_dict) | 123 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/info.py |
class InvalidKeyError(Exception):
"""Raises an error when given key is of invalid datatype."""
def __init__(self, hash_data):
self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
self.suffix = "\nKeys should be either str, int or bytes type"
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}") | 124 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/keyhash.py |
class DuplicatedKeysError(Exception):
"""Raise an error when duplicate key found."""
def __init__(self, key, duplicate_key_indices, fix_msg=""):
self.key = key
self.duplicate_key_indices = duplicate_key_indices
self.fix_msg = fix_msg
self.prefix = "Found multiple examples generated with the same key"
if len(duplicate_key_indices) <= 20:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
else:
self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
self.suffix = "\n" + fix_msg if fix_msg else ""
super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}") | 125 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/keyhash.py |
class KeyHasher:
"""KeyHasher class for providing hash using md5"""
def __init__(self, hash_salt: str):
self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
def hash(self, key: Union[str, int, bytes]) -> int:
"""Returns 128-bits unique hash of input key
Args:
key: the input key to be hashed (should be str, int or bytes)
Returns: 128-bit int hash key"""
md5 = self._split_md5.copy()
byte_key = _as_bytes(key)
md5.update(byte_key)
# Convert to integer with hexadecimal conversion
return int(md5.hexdigest(), 16) | 126 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/keyhash.py |
class ParallelBackendConfig:
backend_name = None | 127 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/parallel/parallel.py |
class ParquetDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
self.builder = Parquet(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
hash=hash,
**kwargs, | 128 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py |
) | 128 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py |
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | 128 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py |
class ParquetDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
storage_options: Optional[dict] = None,
**parquet_writer_kwargs,
):
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size or get_writer_batch_size(dataset.features)
self.storage_options = storage_options or {}
self.parquet_writer_kwargs = parquet_writer_kwargs
def write(self) -> int:
batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE | 129 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py |
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer:
written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
return written
def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
"""Writes the pyarrow table as Parquet to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
_ = parquet_writer_kwargs.pop("path_or_buf", None)
schema = self.dataset.features.arrow_schema
writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) | 129 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py |
for offset in hf_tqdm(
range(0, len(self.dataset), batch_size),
unit="ba",
desc="Creating parquet from Arrow format",
):
batch = query_table(
table=self.dataset._data,
key=slice(offset, offset + batch_size),
indices=self.dataset._indices,
)
writer.write_table(batch)
written += batch.nbytes
writer.close()
return written | 129 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/parquet.py |
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
num_proc: Optional[int] = None,
split: NamedSplit = Split.TRAIN,
**kwargs,
):
super().__init__(
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
split=split,
**kwargs,
) | 130 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/generator.py |
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.builder.config.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.builder.config.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | 130 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/generator.py |
class CsvDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Csv(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
) | 131 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py |
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | 131 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py |
class CsvDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_csv_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.encoding = "utf-8"
self.storage_options = storage_options or {}
self.to_csv_kwargs = to_csv_kwargs
def write(self) -> int:
_ = self.to_csv_kwargs.pop("path_or_buf", None)
header = self.to_csv_kwargs.pop("header", True)
index = self.to_csv_kwargs.pop("index", False) | 132 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py |
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", **(self.storage_options or {})) as buffer:
written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs)
else:
written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs)
return written
def _batch_csv(self, args):
offset, header, index, to_csv_kwargs = args
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
csv_str = batch.to_pandas().to_csv(
path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs
)
return csv_str.encode(self.encoding) | 132 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py |
def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
"""Writes the pyarrow table as CSV to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating CSV from Arrow format",
):
csv_str = self._batch_csv((offset, header, index, to_csv_kwargs))
written += file_obj.write(csv_str) | 132 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py |
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for csv_str in hf_tqdm(
pool.imap(
self._batch_csv,
[(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating CSV from Arrow format",
):
written += file_obj.write(csv_str)
return written | 132 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/csv.py |
class TextDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Text(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
**kwargs,
) | 133 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/text.py |
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | 133 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/text.py |
class SparkDatasetReader(AbstractDatasetReader):
"""A dataset reader that reads from a Spark DataFrame.
When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
provided. Streaming is not currently supported.
""" | 134 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/spark.py |
def __init__(
self,
df: pyspark.sql.DataFrame,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
streaming: bool = True,
cache_dir: str = None,
keep_in_memory: bool = False,
working_dir: str = None,
load_from_cache_file: bool = True,
file_format: str = "arrow",
**kwargs,
):
super().__init__(
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
**kwargs,
)
self._load_from_cache_file = load_from_cache_file
self._file_format = file_format
self.builder = Spark(
df=df,
features=features,
cache_dir=cache_dir,
working_dir=working_dir,
**kwargs,
) | 134 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/spark.py |
def read(self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=download_mode,
file_format=self._file_format,
)
return self.builder.as_dataset(split=self.split) | 134 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/spark.py |
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
) | 135 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/sql.py |
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | 135 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/sql.py |
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
index = self.to_sql_kwargs.pop("index", False)
written = self._write(index=index, **self.to_sql_kwargs)
return written | 136 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/sql.py |
def _batch_sql(self, args):
offset, index, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, index, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0 | 136 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/sql.py |
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in hf_tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating SQL from Arrow format",
):
written += num_rows
return written | 136 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/sql.py |
class JsonDatasetReader(AbstractDatasetReader):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
field: Optional[str] = None,
num_proc: Optional[int] = None,
**kwargs,
):
super().__init__(
path_or_paths,
split=split,
features=features,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
streaming=streaming,
num_proc=num_proc,
**kwargs,
)
self.field = field
path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
self.builder = Json(
cache_dir=cache_dir,
data_files=path_or_paths,
features=features,
field=field,
**kwargs, | 137 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
) | 137 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
num_proc=self.num_proc,
)
dataset = self.builder.as_dataset(
split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset | 137 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
class JsonDatasetWriter:
def __init__(
self,
dataset: Dataset,
path_or_buf: Union[PathLike, BinaryIO],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
storage_options: Optional[dict] = None,
**to_json_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.path_or_buf = path_or_buf
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.encoding = "utf-8"
self.storage_options = storage_options or {}
self.to_json_kwargs = to_json_kwargs | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
def write(self) -> int:
_ = self.to_json_kwargs.pop("path_or_buf", None)
orient = self.to_json_kwargs.pop("orient", "records")
lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
if "index" not in self.to_json_kwargs and orient in ["split", "table"]:
self.to_json_kwargs["index"] = False
# Determine the default compression value based on self.path_or_buf type
default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None
compression = self.to_json_kwargs.pop("compression", default_compression)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression") | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
if not lines and self.batch_size < self.dataset.num_rows:
raise NotImplementedError(
"Output JSON will not be formatted correctly when lines = False and batch_size < number of rows in the dataset. Use pandas.DataFrame.to_json() instead."
) | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(
self.path_or_buf, "wb", compression=compression, **(self.storage_options or {})
) as buffer:
written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead."
)
written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs)
return written
def _batch_json(self, args):
offset, orient, lines, to_json_kwargs = args | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def _write(
self,
file_obj: BinaryIO,
orient,
lines,
**to_json_kwargs,
) -> int:
"""Writes the pyarrow table as JSON lines to a binary file handle.
Caller is responsible for opening and closing the handle.
"""
written = 0 | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating json from Arrow format",
):
json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
written += file_obj.write(json_str)
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in hf_tqdm(
pool.imap(
self._batch_json,
[(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating json from Arrow format",
): | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
written += file_obj.write(json_str) | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
return written | 138 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/json.py |
class AbstractDatasetReader(ABC):
def __init__(
self,
path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.path_or_paths = path_or_paths
self.split = split if split or isinstance(path_or_paths, dict) else "train"
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass | 139 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/abc.py |
class AbstractDatasetInputStream(ABC):
def __init__(
self,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
num_proc: Optional[int] = None,
**kwargs,
):
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.num_proc = num_proc
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, IterableDataset]:
pass | 140 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/io/abc.py |
class Value:
"""
Scalar feature value of a particular data type.
The possible dtypes of `Value` are as follows:
- `null`
- `bool`
- `int8`
- `int16`
- `int32`
- `int64`
- `uint8`
- `uint16`
- `uint32`
- `uint64`
- `float16`
- `float32` (alias float)
- `float64` (alias double)
- `time32[(s|ms)]`
- `time64[(us|ns)]`
- `timestamp[(s|ms|us|ns)]`
- `timestamp[(s|ms|us|ns), tz=(tzstring)]`
- `date32`
- `date64`
- `duration[(s|ms|us|ns)]`
- `decimal128(precision, scale)`
- `decimal256(precision, scale)`
- `binary`
- `large_binary`
- `string`
- `large_string`
Args:
dtype (`str`):
Name of the data type.
Example:
```py
>>> from datasets import Features
>>> features = Features({'stars': Value(dtype='int32')})
>>> features
{'stars': Value(dtype='int32', id=None)}
```
""" | 141 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.