text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
"""
Rename a column in the dataset, and move the features associated to the original column under the new column
name.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
original_column_name (`str`):
Name of the column to rename.
new_column_name (`str`):
New name for the column.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with a renamed column.
Example: | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_column("text", "movie_review")
>>> next(iter(ds["train"]))
{'label': 1,
'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict(
{
k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
for k, dataset in self.items()
}
) | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
"""
Rename several columns in the dataset, and move the features associated to the original columns under
the new column names.
The renaming is applied to all the datasets of the dataset dictionary.
Args:
column_mapping (`Dict[str, str]`):
A mapping of columns to rename to their new names.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with renamed columns
Example: | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
>>> next(iter(ds["train"]))
{'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'rating': 1}
```
"""
return IterableDatasetDict(
{k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
)
def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""
Remove one or several column(s) in the dataset and the features associated to them.
The removal is done on-the-fly on the examples when iterating over the dataset.
The removal is applied to all the datasets of the dataset dictionary. | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to remove.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.remove_columns("label")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()}) | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
"""Select one or several column(s) in the dataset and the features
associated to them. The selection is done on-the-fly on the examples
when iterating over the dataset. The selection is applied to all the
datasets of the dataset dictionary.
Args:
column_names (`Union[str, List[str]]`):
Name of the column(s) to keep.
Returns:
[`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
Example: | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds = ds.select("text")
>>> next(iter(ds["train"]))
{'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
"""
return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
"""Cast column to feature for decoding.
The type casting is applied to all the datasets of the dataset dictionary.
Args:
column (`str`):
Column name.
feature ([`Feature`]):
Target feature.
Returns:
[`IterableDatasetDict`]
Example: | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
```py
>>> from datasets import load_dataset, ClassLabel
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='string', id=None)}
```
"""
return IterableDatasetDict(
{k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
)
def cast(
self,
features: Features,
) -> "IterableDatasetDict":
"""
Cast the dataset to a new set of features.
The type casting is applied to all the datasets of the dataset dictionary. | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
Args:
features (`Features`):
New features to cast the dataset to.
The name of the fields in the features must match the current column names.
The type of the data must also be convertible from one type to the other.
For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
Returns:
[`IterableDatasetDict`]: A copy of the dataset with casted features.
Example: | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", streaming=True)
>>> ds["train"].features
{'label': ClassLabel(names=['neg', 'pos'], id=None),
'text': Value(dtype='string', id=None)}
>>> new_features = ds["train"].features.copy()
>>> new_features['label'] = ClassLabel(names=['bad', 'good'])
>>> new_features['text'] = Value('large_string')
>>> ds = ds.cast(new_features)
>>> ds["train"].features
{'label': ClassLabel(names=['bad', 'good'], id=None),
'text': Value(dtype='large_string', id=None)}
```
"""
return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()}) | 35 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/dataset_dict.py |
class _TempCacheDir:
"""
A temporary directory for storing cached Arrow files with a cleanup that frees references to the Arrow files
before deleting the directory itself to avoid permission errors on Windows.
"""
def __init__(self):
self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX)
self._finalizer = weakref.finalize(self, self._cleanup)
def _cleanup(self):
for dset in get_datasets_with_cache_file_in_temp_dir():
dset.__del__()
if os.path.exists(self.name):
try:
shutil.rmtree(self.name)
except Exception as e:
raise OSError(
f"An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually."
) from e
def cleanup(self):
if self._finalizer.detach():
self._cleanup() | 36 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/fingerprint.py |
class Hasher:
"""Hasher that accepts python objects as inputs."""
dispatch: Dict = {}
def __init__(self):
self.m = xxhash.xxh64()
@classmethod
def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str:
value = [value] if isinstance(value, bytes) else value
m = xxhash.xxh64()
for x in value:
m.update(x)
return m.hexdigest()
@classmethod
def hash(cls, value: Any) -> str:
return cls.hash_bytes(dumps(value))
def update(self, value: Any) -> None:
header_for_update = f"=={type(value)}=="
value_for_update = self.hash(value)
self.m.update(header_for_update.encode("utf8"))
self.m.update(value_for_update.encode("utf-8"))
def hexdigest(self) -> str:
return self.m.hexdigest() | 37 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/fingerprint.py |
class SchemaInferenceError(ValueError):
pass | 38 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
class TypedSequence:
"""
This data container generalizes the typing when instantiating pyarrow arrays, tables or batches.
More specifically it adds several features:
- Support extension types like ``datasets.features.Array2DExtensionType``:
By default pyarrow arrays don't return extension arrays. One has to call
``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))``
in order to get an extension array.
- Support for ``try_type`` parameter that can be used instead of ``type``:
When an array is transformed, we like to keep the same type as before if possible.
For example when calling :func:`datasets.Dataset.map`, we don't want to change the type
of each column by default.
- Better error message when a pyarrow array overflows.
Example::
from datasets.features import Array2D, Array2DExtensionType, Value
from datasets.arrow_writer import TypedSequence
import pyarrow as pa | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
assert arr.type == pa.int32()
arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
assert arr.type == pa.int32()
arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32")))
assert arr.type == pa.string()
arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
assert arr.type == Array2DExtensionType((1, 3), "int64")
table = pa.Table.from_pydict({
"image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))
})
assert table["image"].type == Array2DExtensionType((1, 3), "int64")
""" | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def __init__(
self,
data: Iterable,
type: Optional[FeatureType] = None,
try_type: Optional[FeatureType] = None,
optimized_int_type: Optional[FeatureType] = None,
):
# assert type is None or try_type is None,
if type is not None and try_type is not None:
raise ValueError("You cannot specify both type and try_type")
# set attributes
self.data = data
self.type = type
self.try_type = try_type # is ignored if it doesn't match the data
self.optimized_int_type = optimized_int_type
# when trying a type (is ignored if data is not compatible)
self.trying_type = self.try_type is not None
self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None
# used to get back the inferred type after __arrow_array__() is called once
self._inferred_type = None | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def get_inferred_type(self) -> FeatureType:
"""Return the inferred feature type.
This is done by converting the sequence to an Arrow array, and getting the corresponding
feature type.
Since building the Arrow array can be expensive, the value of the inferred type is cached
as soon as pa.array is called on the typed sequence.
Returns:
FeatureType: inferred feature type of the sequence.
"""
if self._inferred_type is None:
self._inferred_type = generate_from_arrow_type(pa.array(self).type)
return self._inferred_type
@staticmethod
def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]:
"""Implement type inference for custom objects like PIL.Image.Image -> Image type. | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
This function is only used for custom python objects that can't be direclty passed to build
an Arrow array. In such cases is infers the feature type to use, and it encodes the data so
that they can be passed to an Arrow array.
Args:
data (Iterable): array of data to infer the type, e.g. a list of PIL images.
Returns:
Tuple[Iterable, Optional[FeatureType]]: a tuple with:
- the (possibly encoded) array, if the inferred feature type requires encoding
- the inferred feature type if the array is made of supported custom objects like
PIL images, else None.
"""
if config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
non_null_idx, non_null_value = first_non_null_value(data)
if isinstance(non_null_value, PIL.Image.Image):
return [Image().encode_example(value) if value is not None else None for value in data], Image()
return data, None
def __arrow_array__(self, type: Optional[pa.DataType] = None):
"""This function is called when calling pa.array(typed_sequence)""" | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
if type is not None:
raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)")
del type # make sure we don't use it
data = self.data
# automatic type inference for custom objects
if self.type is None and self.try_type is None:
data, self._inferred_type = self._infer_custom_type_and_encode(data)
if self._inferred_type is None:
type = self.try_type if self.trying_type else self.type
else:
type = self._inferred_type
pa_type = get_nested_type(type) if type is not None else None
optimized_int_pa_type = (
get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None
)
trying_cast_to_python_objects = False
try:
# custom pyarrow types
if isinstance(pa_type, _ArrayXDExtensionType):
storage = to_pyarrow_listarray(data, pa_type) | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
return pa.ExtensionArray.from_storage(pa_type, storage) | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
# efficient np array to pyarrow array
if isinstance(data, np.ndarray):
out = numpy_to_pyarrow_listarray(data)
elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
out = list_of_np_array_to_pyarrow_listarray(data)
else:
trying_cast_to_python_objects = True
out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
# use smaller integer precisions if possible
if self.trying_int_optimization:
if pa.types.is_int64(out.type):
out = out.cast(optimized_int_pa_type)
elif pa.types.is_list(out.type):
if pa.types.is_int64(out.type.value_type):
out = array_cast(out, pa.list_(optimized_int_pa_type))
elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type): | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type)))
# otherwise we can finally use the user's type
elif type is not None:
# We use cast_array_to_feature to support casting to custom types like Audio and Image
# Also, when trying type "string", we don't want to convert integers or floats to "string".
# We only do it if trying_type is False - since this is what the user asks for.
out = cast_array_to_feature(
out, type, allow_primitive_to_str=not self.trying_type, allow_decimal_to_str=not self.trying_type
)
return out
except (
TypeError,
pa.lib.ArrowInvalid,
pa.lib.ArrowNotImplementedError,
) as e: # handle type errors and overflows
# Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError):
raise | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
if self.trying_type:
try: # second chance
if isinstance(data, np.ndarray):
return numpy_to_pyarrow_listarray(data)
elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data):
return list_of_np_array_to_pyarrow_listarray(data)
else:
trying_cast_to_python_objects = True
return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
except pa.lib.ArrowInvalid as e:
if "overflow" in str(e):
raise OverflowError(
f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
) from None
elif self.trying_int_optimization and "not in range" in str(e): | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
logger.info(
f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64."
)
return out
elif trying_cast_to_python_objects and "Could not convert" in str(e):
out = pa.array(
cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)
)
if type is not None:
out = cast_array_to_feature(
out, type, allow_primitive_to_str=True, allow_decimal_to_str=True
)
return out
else:
raise
elif "overflow" in str(e):
raise OverflowError( | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
) from None
elif self.trying_int_optimization and "not in range" in str(e):
optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.")
return out
elif trying_cast_to_python_objects and "Could not convert" in str(e):
out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False))
if type is not None:
out = cast_array_to_feature(out, type, allow_primitive_to_str=True, allow_decimal_to_str=True)
return out
else:
raise | 39 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
class OptimizedTypedSequence(TypedSequence):
def __init__(
self,
data,
type: Optional[FeatureType] = None,
try_type: Optional[FeatureType] = None,
col: Optional[str] = None,
optimized_int_type: Optional[FeatureType] = None,
):
optimized_int_type_by_col = {
"attention_mask": Value("int8"), # binary tensor
"special_tokens_mask": Value("int8"),
"input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M)
"token_type_ids": Value(
"int8"
), # binary mask; some (XLNetModel) use an additional token represented by a 2
}
if type is None and try_type is None:
optimized_int_type = optimized_int_type_by_col.get(col, None)
super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type) | 40 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
class ArrowWriter:
"""Shuffles and writes Examples to Arrow files."""
_WRITER_CLASS = pa.RecordBatchStreamWriter | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def __init__(
self,
schema: Optional[pa.Schema] = None,
features: Optional[Features] = None,
path: Optional[str] = None,
stream: Optional[pa.NativeFile] = None,
fingerprint: Optional[str] = None,
writer_batch_size: Optional[int] = None,
hash_salt: Optional[str] = None,
check_duplicates: Optional[bool] = False,
disable_nullable: bool = False,
update_features: bool = False,
with_metadata: bool = True,
unit: str = "examples",
embed_local_files: bool = False,
storage_options: Optional[dict] = None,
):
if path is None and stream is None:
raise ValueError("At least one of path and stream must be provided.")
if features is not None:
self._features = features
self._schema = None
elif schema is not None:
self._schema: pa.Schema = schema
self._features = Features.from_arrow_schema(self._schema)
else: | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
self._features = None
self._schema = None | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
if hash_salt is not None:
# Create KeyHasher instance using split name as hash salt
self._hasher = KeyHasher(hash_salt)
else:
self._hasher = KeyHasher("")
self._check_duplicates = check_duplicates
self._disable_nullable = disable_nullable
if stream is None:
fs, path = url_to_fs(path, **(storage_options or {}))
self._fs: fsspec.AbstractFileSystem = fs
self._path = path if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(path)
self.stream = self._fs.open(path, "wb")
self._closable_stream = True
else:
self._fs = None
self._path = None
self.stream = stream
self._closable_stream = False | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
self.fingerprint = fingerprint
self.disable_nullable = disable_nullable
self.writer_batch_size = (
writer_batch_size or get_writer_batch_size(self._features) or config.DEFAULT_MAX_BATCH_SIZE
)
self.update_features = update_features
self.with_metadata = with_metadata
self.unit = unit
self.embed_local_files = embed_local_files
self._num_examples = 0
self._num_bytes = 0
self.current_examples: List[Tuple[Dict[str, Any], str]] = []
self.current_rows: List[pa.Table] = []
self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None
self.hkey_record = []
def __len__(self):
"""Return the number of writed and staged examples"""
return self._num_examples + len(self.current_examples) + len(self.current_rows)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close() | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def close(self):
# Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file
if self.pa_writer: # it might be None
try:
self.pa_writer.close()
except Exception: # pyarrow.lib.ArrowInvalid, OSError
pass
if self._closable_stream and not self.stream.closed:
self.stream.close() # This also closes self.pa_writer if it is opened | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def _build_writer(self, inferred_schema: pa.Schema):
schema = self.schema
inferred_features = Features.from_arrow_schema(inferred_schema)
if self._features is not None:
if self.update_features: # keep original features it they match, or update them
fields = {field.name: field for field in self._features.type}
for inferred_field in inferred_features.type:
name = inferred_field.name
if name in fields:
if inferred_field == fields[name]:
inferred_features[name] = self._features[name]
self._features = inferred_features
schema: pa.Schema = inferred_schema
else:
self._features = inferred_features
schema: pa.Schema = inferred_features.arrow_schema
if self.disable_nullable:
schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema) | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
if self.with_metadata:
schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint))
else:
schema = schema.with_metadata({})
self._schema = schema
self.pa_writer = self._WRITER_CLASS(self.stream, schema) | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
@property
def schema(self):
_schema = (
self._schema
if self._schema is not None
else (pa.schema(self._features.type) if self._features is not None else None)
)
if self._disable_nullable and _schema is not None:
_schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema)
return _schema if _schema is not None else []
@staticmethod
def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]:
info_keys = ["features"] # we can add support for more DatasetInfo keys in the future
info_as_dict = asdict(info)
metadata = {}
metadata["info"] = {key: info_as_dict[key] for key in info_keys}
if fingerprint is not None:
metadata["fingerprint"] = fingerprint
return {"huggingface": json.dumps(metadata)} | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def write_examples_on_file(self):
"""Write stored examples from the write-pool of examples. It makes a table out of the examples and write it."""
if not self.current_examples:
return
# preserve the order the columns
if self.schema:
schema_cols = set(self.schema.names)
examples_cols = self.current_examples[0][0].keys() # .keys() preserves the order (unlike set)
common_cols = [col for col in self.schema.names if col in examples_cols]
extra_cols = [col for col in examples_cols if col not in schema_cols]
cols = common_cols + extra_cols
else:
cols = list(self.current_examples[0][0])
batch_examples = {}
for col in cols:
# We use row[0][col] since current_examples contains (example, key) tuples.
# Morever, examples could be Arrow arrays of 1 element.
# This can happen in `.map()` when we want to re-write the same Arrow data | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
arrays = [row[0][col] for row in self.current_examples]
arrays = [
chunk
for array in arrays
for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])
]
batch_examples[col] = pa.concat_arrays(arrays)
else:
batch_examples[col] = [
row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
for row in self.current_examples
]
self.write_batch(batch_examples=batch_examples)
self.current_examples = [] | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def write_rows_on_file(self):
"""Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table."""
if not self.current_rows:
return
table = pa.concat_tables(self.current_rows)
self.write_table(table)
self.current_rows = []
def write(
self,
example: Dict[str, Any],
key: Optional[Union[str, int, bytes]] = None,
writer_batch_size: Optional[int] = None,
):
"""Add a given (Example,Key) pair to the write-pool of examples which is written to file. | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
Args:
example: the Example to add.
key: Optional, a unique identifier(str, int or bytes) associated with each example
"""
# Utilize the keys and duplicate checking when `self._check_duplicates` is passed True
if self._check_duplicates:
# Create unique hash from key and store as (key, example) pairs
hash = self._hasher.hash(key)
self.current_examples.append((example, hash))
# Maintain record of keys and their respective hashes for checking duplicates
self.hkey_record.append((hash, key))
else:
# Store example as a tuple so as to keep the structure of `self.current_examples` uniform
self.current_examples.append((example, "")) | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size:
if self._check_duplicates:
self.check_duplicate_keys()
# Re-intializing to empty list for next batch
self.hkey_record = []
self.write_examples_on_file()
def check_duplicate_keys(self):
"""Raises error if duplicates found in a batch"""
tmp_record = set()
for hash, key in self.hkey_record:
if hash in tmp_record:
duplicate_key_indices = [
str(self._num_examples + index)
for index, (duplicate_hash, _) in enumerate(self.hkey_record)
if duplicate_hash == hash
]
raise DuplicatedKeysError(key, duplicate_key_indices)
else:
tmp_record.add(hash) | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None):
"""Add a given single-row Table to the write-pool of rows which is written to file.
Args:
row: the row to add.
"""
if len(row) != 1:
raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.")
self.current_rows.append(row)
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
self.write_rows_on_file()
def write_batch(
self,
batch_examples: Dict[str, List],
writer_batch_size: Optional[int] = None,
):
"""Write a batch of Example to file.
Ignores the batch if it appears to be empty,
preventing a potential schema update of unknown types. | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
Args:
batch_examples: the batch of examples to add.
"""
if batch_examples and len(next(iter(batch_examples.values()))) == 0:
return
features = None if self.pa_writer is None and self.update_features else self._features
try_features = self._features if self.pa_writer is None and self.update_features else None
arrays = []
inferred_features = Features()
# preserve the order the columns
if self.schema:
schema_cols = set(self.schema.names)
batch_cols = batch_examples.keys() # .keys() preserves the order (unlike set)
common_cols = [col for col in self.schema.names if col in batch_cols]
extra_cols = [col for col in batch_cols if col not in schema_cols]
cols = common_cols + extra_cols
else:
cols = list(batch_examples)
for col in cols:
col_values = batch_examples[col] | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
col_type = features[col] if features else None
if isinstance(col_values, (pa.Array, pa.ChunkedArray)):
array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values
arrays.append(array)
inferred_features[col] = generate_from_arrow_type(col_values.type)
else:
col_try_type = try_features[col] if try_features is not None and col in try_features else None
typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)
arrays.append(pa.array(typed_sequence))
inferred_features[col] = typed_sequence.get_inferred_type()
schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
pa_table = pa.Table.from_arrays(arrays, schema=schema)
self.write_table(pa_table, writer_batch_size) | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
"""Write a Table to file.
Args:
example: the Table to add.
"""
if writer_batch_size is None:
writer_batch_size = self.writer_batch_size
if self.pa_writer is None:
self._build_writer(inferred_schema=pa_table.schema)
pa_table = pa_table.combine_chunks()
pa_table = table_cast(pa_table, self._schema)
if self.embed_local_files:
pa_table = embed_table_storage(pa_table)
self._num_bytes += pa_table.nbytes
self._num_examples += pa_table.num_rows
self.pa_writer.write_table(pa_table, writer_batch_size) | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
def finalize(self, close_stream=True):
self.write_rows_on_file()
# In case current_examples < writer_batch_size, but user uses finalize()
if self._check_duplicates:
self.check_duplicate_keys()
# Re-intializing to empty list for next batch
self.hkey_record = []
self.write_examples_on_file()
# If schema is known, infer features even if no examples were written
if self.pa_writer is None and self.schema:
self._build_writer(self.schema)
if self.pa_writer is not None:
self.pa_writer.close()
self.pa_writer = None
if close_stream:
self.stream.close()
else:
if close_stream:
self.stream.close()
raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
logger.debug( | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}."
)
return self._num_examples, self._num_bytes | 41 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
class ParquetWriter(ArrowWriter):
_WRITER_CLASS = pq.ParquetWriter | 42 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_writer.py |
class DatasetNotOnHfGcsError(ConnectionError):
"""When you can't get the dataset from the Hf google cloud storage"""
pass | 43 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class MissingFilesOnHfGcsError(ConnectionError):
"""When some files are missing on the Hf oogle cloud storage"""
pass | 44 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class FileInstructions:
"""The file instructions associated with a split ReadInstruction.
Attributes:
num_examples: `int`, The total number of examples
file_instructions: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
"""
num_examples: int
file_instructions: List[dict] | 45 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class BaseReader:
"""
Build a Dataset object out of Instruction instance(s).
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
self._path: str = path
self._info: Optional["DatasetInfo"] = info
self._filetype_suffix: Optional[str] = None
def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
"""Returns a Dataset instance from given (filename, skip, take)."""
raise NotImplementedError
def _read_files(self, files, in_memory=False) -> Table:
"""Returns Dataset for given file instructions. | 46 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contain the absolute path, not relative.
skip/take indicates which example read in the file: `ds.slice(skip, take)`
in_memory (bool, default False): Whether to copy the data in-memory.
"""
if len(files) == 0 or not all(isinstance(f, dict) for f in files):
raise ValueError("please provide valid file informations")
files = copy.deepcopy(files)
for f in files:
f["filename"] = os.path.join(self._path, f["filename"]) | 46 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
pa_tables = thread_map(
partial(self._get_table_from_filename, in_memory=in_memory),
files,
tqdm_class=hf_tqdm,
desc="Loading dataset shards",
# set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
disable=len(files) <= 16 or None,
)
pa_tables = [t for t in pa_tables if len(t) > 0]
if not pa_tables and (self._info is None or self._info.features is None):
raise ValueError(
"Tried to read an empty table. Please specify at least info.features to create an empty table with the right type."
)
pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))]
pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0]
return pa_table | 46 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
def get_file_instructions(self, name, instruction, split_infos):
"""Return list of dict {'filename': str, 'skip': int, 'take': int}"""
file_instructions = make_file_instructions(
name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path
)
files = file_instructions.file_instructions
return files
def read(
self,
name,
instructions,
split_infos,
in_memory=False,
):
"""Returns Dataset instance(s).
Args:
name (str): name of the dataset.
instructions (ReadInstruction): instructions to read.
Instruction can be string and will then be passed to the Instruction
constructor as it.
split_infos (list of SplitInfo proto): the available splits for dataset.
in_memory (bool, default False): Whether to copy the data in-memory. | 46 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
Returns:
kwargs to build a single Dataset instance.
"""
files = self.get_file_instructions(name, instructions, split_infos)
if not files:
msg = f'Instruction "{instructions}" corresponds to no data!'
raise ValueError(msg)
return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)
def read_files(
self,
files: List[dict],
original_instructions: Union[None, "ReadInstruction", "Split"] = None,
in_memory=False,
):
"""Returns single Dataset instance for the set of file instructions. | 46 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
Args:
files: List[dict(filename, skip, take)], the files information.
The filenames contains the relative path, not absolute.
skip/take indicates which example read in the file: `ds.skip().take()`
original_instructions: store the original instructions used to build the dataset split in the dataset.
in_memory (bool, default False): Whether to copy the data in-memory.
Returns:
kwargs to build a Dataset instance.
"""
# Prepend path to filename
pa_table = self._read_files(files, in_memory=in_memory)
# If original_instructions is not None, convert it to a human-readable NamedSplit
if original_instructions is not None:
from .splits import Split # noqa
split = Split(str(original_instructions))
else:
split = None
dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split}
return dataset_kwargs | 46 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class ArrowReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses either memory mapping or file descriptors (in-memory) on arrow files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ArrowReader.
Args:
path (str): path where Arrow files are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "arrow" | 47 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
table = ArrowReader.read_table(filename, in_memory=in_memory)
if take == -1:
take = len(table) - skip
# here we don't want to slice an empty table, or it may segfault
if skip is not None and take is not None and not (skip == 0 and take == len(table)):
table = table.slice(skip, take)
return table
@staticmethod
def read_table(filename, in_memory=False) -> Table:
"""
Read table from file. | 47 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
Args:
filename (str): File name of the table.
in_memory (bool, default=False): Whether to copy the data in-memory.
Returns:
pyarrow.Table
"""
table_cls = InMemoryTable if in_memory else MemoryMappedTable
return table_cls.from_file(filename) | 47 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class ParquetReader(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This Reader uses memory mapping on parquet files.
"""
def __init__(self, path: str, info: Optional["DatasetInfo"]):
"""Initializes ParquetReader.
Args:
path (str): path where tfrecords are stored.
info (DatasetInfo): info about the dataset.
"""
super().__init__(path, info)
self._filetype_suffix = "parquet" | 48 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
def _get_table_from_filename(self, filename_skip_take, **kwargs):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
# Parquet read_table always loads data in memory, independently of memory_map
pa_table = pq.read_table(filename, memory_map=True)
# here we don't want to slice an empty table, or it may segfault
if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)):
pa_table = pa_table.slice(skip, take)
return pa_table | 48 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class _AbsoluteInstruction:
"""A machine friendly slice: defined absolute positive boundaries."""
splitname: str
from_: int # uint (starting index).
to: int # uint (ending index). | 49 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class _RelativeInstruction:
"""Represents a single parsed slicing instruction, can use % and negatives."""
splitname: str
from_: Optional[int] = None # int (starting index) or None if no lower boundary.
to: Optional[int] = None # int (ending index) or None if no upper boundary.
unit: Optional[str] = None
rounding: Optional[str] = None | 50 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
def __post_init__(self):
if self.unit is not None and self.unit not in ["%", "abs"]:
raise ValueError("unit must be either % or abs")
if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]:
raise ValueError("rounding must be either closest or pct1_dropremainder")
if self.unit != "%" and self.rounding is not None:
raise ValueError("It is forbidden to specify rounding if not using percent slicing.")
if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
raise ValueError("Percent slice boundaries must be > -100 and < 100.")
if self.unit == "%" and self.to is not None and abs(self.to) > 100:
raise ValueError("Percent slice boundaries must be > -100 and < 100.")
# Update via __dict__ due to instance being "frozen"
self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding | 50 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class ReadInstruction:
"""Reading instruction for a dataset.
Examples::
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%]')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
'test', from_=0, to=33, unit='%'))
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
'test[:33%]+train[1:-1]'))
ds = datasets.load_dataset('mnist', split=(
datasets.ReadInstruction('test', to=33, unit='%') +
datasets.ReadInstruction('train', from_=1, to=-1, unit='abs'))) | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
# The following lines are equivalent:
ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)')
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
'test[:33%](pct1_dropremainder)'))
ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder"))
# 10-fold validation:
tests = datasets.load_dataset(
'mnist',
[datasets.ReadInstruction('train', from_=k, to=k+10, unit='%')
for k in range(0, 100, 10)])
trains = datasets.load_dataset(
'mnist',
[datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%')
for k in range(0, 100, 10)])
"""
def _init(self, relative_instructions):
# Private initializer.
self._relative_instructions = relative_instructions | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
@classmethod
def _read_instruction_from_relative_instructions(cls, relative_instructions):
"""Returns ReadInstruction obj initialized with relative_instructions."""
# Use __new__ to bypass __init__ used by public API and not conveniant here.
result = cls.__new__(cls)
result._init(relative_instructions) # pylint: disable=protected-access
return result
def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None):
"""Initialize ReadInstruction. | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
Args:
split_name (str): name of the split to read. Eg: 'train'.
rounding (str, optional): The rounding behaviour to use when percent slicing is
used. Ignored when slicing with absolute indices.
Possible values:
- 'closest' (default): The specified percentages are rounded to the
closest value. Use this if you want specified percents to be as
much exact as possible.
- 'pct1_dropremainder': the specified percentages are treated as
multiple of 1%. Use this option if you want consistency. Eg:
len(5%) == 5 * len(1%).
Using this option, one might not be able to use the full set of
examples, if the number of those is not a multiple of 100.
from_ (int):
to (int): alternative way of specifying slicing boundaries. If any of | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
{from_, to, unit} argument is used, slicing cannot be specified as
string.
unit (str): optional, one of:
'%': to set the slicing unit as percents of the split size.
'abs': to set the slicing unit as absolute numbers.
"""
# This constructor is not always called. See factory method
# `_read_instruction_from_relative_instructions`. Common init instructions
# MUST be placed in the _init method.
self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)]) | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
@classmethod
def from_spec(cls, spec):
"""Creates a `ReadInstruction` instance out of a string spec.
Args:
spec (`str`):
Split(s) + optional slice(s) to read + optional rounding
if percents are used as the slicing unit. A slice can be specified,
using absolute numbers (`int`) or percentages (`int`).
Examples:
```
test: test split.
test + validation: test split + validation split.
test[10:]: test split, minus its first 10 records.
test[:10%]: first 10% records of test split.
test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding.
test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train.
``` | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
Returns:
ReadInstruction instance.
"""
spec = str(spec) # Need to convert to str in case of NamedSplit instance.
subs = _ADDITION_SEP_RE.split(spec)
if not subs:
raise ValueError(f"No instructions could be built out of {spec}")
instruction = _str_to_read_instruction(subs[0])
return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction) | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
def to_spec(self):
rel_instr_specs = []
for rel_instr in self._relative_instructions:
rel_instr_spec = rel_instr.splitname
if rel_instr.from_ is not None or rel_instr.to is not None:
from_ = rel_instr.from_
to = rel_instr.to
unit = rel_instr.unit
rounding = rel_instr.rounding
unit = unit if unit == "%" else ""
from_ = str(from_) + unit if from_ is not None else ""
to = str(to) + unit if to is not None else ""
slice_str = f"[{from_}:{to}]"
rounding_str = (
f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else ""
)
rel_instr_spec += slice_str + rounding_str
rel_instr_specs.append(rel_instr_spec)
return "+".join(rel_instr_specs) | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
def __add__(self, other):
"""Returns a new ReadInstruction obj, result of appending other to self."""
if not isinstance(other, ReadInstruction):
msg = "ReadInstruction can only be added to another ReadInstruction obj."
raise TypeError(msg)
self_ris = self._relative_instructions
other_ris = other._relative_instructions # pylint: disable=protected-access
if (
self_ris[0].unit != "abs"
and other_ris[0].unit != "abs"
and self._relative_instructions[0].rounding != other_ris[0].rounding
):
raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.")
return self._read_instruction_from_relative_instructions(self_ris + other_ris)
def __str__(self):
return self.to_spec()
def __repr__(self):
return f"ReadInstruction({self._relative_instructions})" | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
def to_absolute(self, name2len):
"""Translate instruction into a list of absolute instructions.
Those absolute instructions are then to be added together.
Args:
name2len (`dict`):
Associating split names to number of examples.
Returns:
list of _AbsoluteInstruction instances (corresponds to the + in spec).
"""
return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions] | 51 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_reader.py |
class InvalidConfigName(ValueError):
pass | 52 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
class BuilderConfig:
"""Base class for `DatasetBuilder` data configuration.
`DatasetBuilder` subclasses with data configuration options should subclass
`BuilderConfig` and add their own properties.
Attributes:
name (`str`, defaults to `default`):
The name of the configuration.
version (`Version` or `str`, defaults to `0.0.0`):
The version of the configuration.
data_dir (`str`, *optional*):
Path to the directory containing the source data.
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
description (`str`, *optional*):
A human description of the configuration.
"""
name: str = "default"
version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
data_dir: Optional[str] = None
data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None
description: Optional[str] = None | 53 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
def __post_init__(self):
# The config name is used to name the cache directory.
for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
if invalid_char in self.name:
raise InvalidConfigName(
f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
f"They could create issues when creating a directory for this config on Windows filesystem."
)
if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)):
raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}") | 53 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
def __eq__(self, o):
# we need to override the default dataclass __eq__ since it doesn't check for
# other attributes that the ones of the signature.
if set(self.__dict__.keys()) != set(o.__dict__.keys()):
return False
return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
def create_config_id(
self,
config_kwargs: dict,
custom_features: Optional[Features] = None,
) -> str:
"""
The config id is used to build the cache directory.
By default it is equal to the config name.
However the name of a config is not sufficient to have a unique identifier for the dataset being generated
since it doesn't take into account:
- the config kwargs that can be used to overwrite attributes
- the custom features used to write the dataset
- the data_files for json/text/csv/pandas datasets | 53 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
Therefore the config id is just the config name with an optional suffix based on these.
"""
# Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
suffix: Optional[str] = None
config_kwargs_to_add_to_suffix = config_kwargs.copy()
# name and version are already used to build the cache directory
config_kwargs_to_add_to_suffix.pop("name", None)
config_kwargs_to_add_to_suffix.pop("version", None)
# data dir handling (when specified it points to the manually downloaded data):
# it was previously ignored before the introduction of config id because we didn't want
# to change the config name. Now it's fine to take it into account for the config id.
# config_kwargs_to_add_to_suffix.pop("data_dir", None)
if "data_dir" in config_kwargs_to_add_to_suffix:
if config_kwargs_to_add_to_suffix["data_dir"] is None: | 53 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
config_kwargs_to_add_to_suffix.pop("data_dir", None)
else:
# canonicalize the data dir to avoid two paths to the same location having different
# hashes
data_dir = config_kwargs_to_add_to_suffix["data_dir"]
data_dir = os.path.normpath(data_dir)
config_kwargs_to_add_to_suffix["data_dir"] = data_dir
if config_kwargs_to_add_to_suffix:
# we don't care about the order of the kwargs
config_kwargs_to_add_to_suffix = {
k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
}
if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
suffix = ",".join(
str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
)
if len(suffix) > 32: # hash if too long | 53 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
else:
suffix = Hasher.hash(config_kwargs_to_add_to_suffix) | 53 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
if custom_features is not None:
m = Hasher()
if suffix:
m.update(suffix)
m.update(custom_features)
suffix = m.hexdigest()
if suffix:
config_id = self.name + "-" + suffix
if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
config_id = self.name + "-" + Hasher.hash(suffix)
return config_id
else:
return self.name
def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None:
if isinstance(self.data_files, DataFilesPatternsDict):
base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path
self.data_files = self.data_files.resolve(base_path, download_config) | 53 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
class DatasetBuilder:
"""Abstract base class for all datasets.
`DatasetBuilder` has 3 key methods:
- [`DatasetBuilder.info`]: Documents the dataset, including feature
names, types, shapes, version, splits, citation, etc.
- [`DatasetBuilder.download_and_prepare`]: Downloads the source data
and writes it to disk.
- [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
Some `DatasetBuilder`s expose multiple variants of the
dataset by defining a [`BuilderConfig`] subclass and accepting a
config object (or name) on construction. Configurable datasets expose a
pre-defined set of configurations in [`DatasetBuilder.builder_configs`]. | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
Args:
cache_dir (`str`, *optional*):
Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
dataset_name (`str`, *optional*):
Name of the dataset, if different from the builder name. Useful for packaged builders
like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
that use the same packaged builder.
config_name (`str`, *optional*):
Name of the dataset configuration.
It affects the data generated on disk. Different configurations will have their own subdirectories and
versions.
If not provided, the default configuration is used (if it exists).
<Added version="2.3.0">
Parameter `name` was renamed to `config_name`. | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
</Added>
hash (`str`, *optional*):
Hash specific to the dataset code. Used to update the caching directory when the
dataset loading script code is updated (to avoid reusing old data).
The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
base_path (`str`, *optional*):
Base path for relative paths that are used to download files.
This can be a remote URL.
features ([`Features`], *optional*):
Features types to use with this dataset.
It can be used to change the [`Features`] types of a dataset, for example.
token (`str` or `bool`, *optional*):
String or boolean to use as Bearer token for remote files on the
Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
repo_id (`str`, *optional*):
ID of the dataset repository. | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
data_files (`str` or `Sequence` or `Mapping`, *optional*):
Path(s) to source data file(s).
For builders like "csv" or "json" that need the user to specify data files. They can be either
local or remote files. For convenience, you can use a `DataFilesDict`.
data_dir (`str`, *optional*):
Path to directory containing source data file(s).
Use only if `data_files` is not passed, in which case it is equivalent to passing
`os.path.join(data_dir, "**")` as `data_files`.
For builders that require manual download, it must be the path to the local directory containing the
manually downloaded data.
storage_options (`dict`, *optional*): | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
Key/value pairs to be passed on to the dataset file-system backend, if any.
writer_batch_size (`int`, *optional*):
Batch size used by the ArrowWriter.
It defines the number of samples that are kept in memory before writing them
and also the length of the arrow chunks.
None means that the ArrowWriter will use its default value.
**config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
configuration class is [`BuilderConfig`] or a subclass of it.
""" | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
# Default version
VERSION = None # Default version set in BuilderConfig
# Class for the builder config.
BUILDER_CONFIG_CLASS = BuilderConfig
# Named configurations that modify the data generated by download_and_prepare.
BUILDER_CONFIGS = []
# Optional default config name to be used when name is None
DEFAULT_CONFIG_NAME = None
# Default batch size used by the ArrowWriter
# It defines the number of samples that are kept in memory before writing them
# and also the length of the arrow chunks
# None means that the ArrowWriter will use its default value
DEFAULT_WRITER_BATCH_SIZE = None | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
def __init__(
self,
cache_dir: Optional[str] = None,
dataset_name: Optional[str] = None,
config_name: Optional[str] = None,
hash: Optional[str] = None,
base_path: Optional[str] = None,
info: Optional[DatasetInfo] = None,
features: Optional[Features] = None,
token: Optional[Union[bool, str]] = None,
repo_id: Optional[str] = None,
data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
data_dir: Optional[str] = None,
storage_options: Optional[dict] = None,
writer_batch_size: Optional[int] = None,
**config_kwargs,
):
# DatasetBuilder name
self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
self.hash: Optional[str] = hash
self.base_path = base_path
self.token = token
self.repo_id = repo_id
self.storage_options = storage_options or {} | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
if data_files is not None and not isinstance(data_files, DataFilesDict):
data_files = DataFilesDict.from_patterns(
sanitize_patterns(data_files),
base_path=base_path,
download_config=DownloadConfig(token=token, storage_options=self.storage_options),
)
# Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
config_kwargs["features"] = features
if data_files is not None:
config_kwargs["data_files"] = data_files
if data_dir is not None:
config_kwargs["data_dir"] = data_dir
self.config_kwargs = config_kwargs
self.config, self.config_id = self._create_builder_config(
config_name=config_name,
custom_features=features,
**config_kwargs,
) | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
# prepare info: DatasetInfo are a standardized dataclass across all datasets
# Prefill datasetinfo
if info is None:
# TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
info = self.get_exported_dataset_info()
info.update(self._info())
info.builder_name = self.name
info.dataset_name = self.dataset_name
info.config_name = self.config.name
info.version = self.config.version
self.info = info
# update info with user specified infos
if features is not None:
self.info.features = features | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
# Prepare data dirs:
# cache_dir can be a remote bucket on GCS or S3
self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
self._cache_dir_root = (
self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
)
self._cache_downloaded_dir = (
posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
if cache_dir
else str(config.DOWNLOADED_DATASETS_PATH)
)
self._cache_downloaded_dir = (
self._cache_downloaded_dir
if is_remote_url(self._cache_downloaded_dir)
else os.path.expanduser(self._cache_downloaded_dir)
)
# In case there exists a legacy cache directory
self._legacy_relative_data_dir = None | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
self._cache_dir = self._build_cache_dir()
if not is_remote_url(self._cache_dir_root):
os.makedirs(self._cache_dir_root, exist_ok=True)
lock_path = os.path.join(
self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock"
)
with FileLock(lock_path):
if os.path.exists(self._cache_dir): # check if data exist
if len(os.listdir(self._cache_dir)) > 0:
if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)):
logger.info("Overwrite dataset info from restored data version if exists.")
self.info = DatasetInfo.from_directory(self._cache_dir)
else: # dir exists but no data, remove the empty dir as data aren't available anymore
logger.warning( | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
)
os.rmdir(self._cache_dir) | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
# Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
self._output_dir = self._cache_dir
self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
# Set download manager
self.dl_manager = None
# Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
self._record_infos = False
# Set in `.download_and_prepare` once the format of the generated dataset is known
self._file_format = None
# Enable streaming (e.g. it patches "open" to work with remote files)
extend_dataset_builder_for_streaming(self)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
# Re-enable streaming, since patched functions are not kept when pickling
extend_dataset_builder_for_streaming(self) | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
# Must be set for datasets that use 'data_dir' functionality - the ones
# that require users to do additional steps to download the data
# (this is usually due to some external regulations / rules).
# This field should contain a string with user instructions, including
# the list of files that should be present. It will be
# displayed in the dataset documentation.
@property
def manual_download_instructions(self) -> Optional[str]:
return None
def _check_legacy_cache(self) -> Optional[str]:
"""Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13"""
if (
self.__module__.startswith("datasets.")
and not is_remote_url(self._cache_dir_root)
and self.config.name == "default"
):
from .packaged_modules import _PACKAGED_DATASETS_MODULES | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
config_id = config_name + self.config_id[len(self.config.name) :]
hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
legacy_relative_data_dir = posixpath.join(
self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
config_id,
"0.0.0",
hash,
)
legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
if os.path.isdir(legacy_cache_dir):
return legacy_relative_data_dir | 54 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/builder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.