text
stringlengths
1
1.02k
class_index
int64
0
271
source
stringclasses
76 values
dtype: str id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="Value", init=False, repr=False) def __post_init__(self): if self.dtype == "double": # fix inferred type self.dtype = "float64" if self.dtype == "float": # fix inferred type self.dtype = "float32" self.pa_type = string_to_arrow(self.dtype) def __call__(self): return self.pa_type def encode_example(self, value): if pa.types.is_boolean(self.pa_type): return bool(value) elif pa.types.is_integer(self.pa_type): return int(value) elif pa.types.is_floating(self.pa_type): return float(value) elif pa.types.is_string(self.pa_type): return str(value) else: return value
141
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class _ArrayXD: def __post_init__(self): self.shape = tuple(self.shape) def __call__(self): pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype) return pa_type def encode_example(self, value): return value
142
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array2D(_ArrayXD): """Create a two-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array2D", init=False, repr=False)
143
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array3D(_ArrayXD): """Create a three-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array3D", init=False, repr=False)
144
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array4D(_ArrayXD): """Create a four-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array4D", init=False, repr=False)
145
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array5D(_ArrayXD): """Create a five-dimensional array. Args: shape (`tuple`): Size of each dimension. dtype (`str`): Name of the data type. Example: ```py >>> from datasets import Features >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')}) ``` """ shape: tuple dtype: str id: Optional[str] = None # Automatically constructed _type: str = field(default="Array5D", init=False, repr=False)
146
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class _ArrayXDExtensionType(pa.ExtensionType): ndims: Optional[int] = None def __init__(self, shape: tuple, dtype: str): if self.ndims is None or self.ndims <= 1: raise ValueError("You must instantiate an array type with a value for dim that is > 1") if len(shape) != self.ndims: raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") for dim in range(1, self.ndims): if shape[dim] is None: raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}") self.shape = tuple(shape) self.value_type = dtype self.storage_dtype = self._generate_dtype(self.value_type) pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}") def __arrow_ext_serialize__(self): return json.dumps((self.shape, self.value_type)).encode()
147
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
@classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized): args = json.loads(serialized) return cls(*args) # This was added to pa.ExtensionType in pyarrow >= 13.0.0 def __reduce__(self): return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__()) def __hash__(self): return hash((self.__class__, self.shape, self.value_type)) def __arrow_ext_class__(self): return ArrayExtensionArray def _generate_dtype(self, dtype): dtype = string_to_arrow(dtype) for d in reversed(self.shape): dtype = pa.list_(dtype) # Don't specify the size of the list, since fixed length list arrays have issues # being validated after slicing in pyarrow 0.17.1 return dtype def to_pandas_dtype(self): return PandasArrayExtensionDtype(self.value_type)
147
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array2DExtensionType(_ArrayXDExtensionType): ndims = 2
148
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array3DExtensionType(_ArrayXDExtensionType): ndims = 3
149
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array4DExtensionType(_ArrayXDExtensionType): ndims = 4
150
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Array5DExtensionType(_ArrayXDExtensionType): ndims = 5
151
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class ArrayExtensionArray(pa.ExtensionArray): def __array__(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) return self.to_numpy(zero_copy_only=zero_copy_only) def __getitem__(self, i): return self.storage[i] def to_numpy(self, zero_copy_only=True): storage: pa.ListArray = self.storage null_mask = storage.is_null().to_numpy(zero_copy_only=False) if self.type.shape[0] is not None: size = 1 null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask)) for i in range(self.type.ndims): size *= self.type.shape[i] storage = storage.flatten() numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only) numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape) if len(null_indices): numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
152
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
else: shape = self.type.shape ndims = self.type.ndims arrays = [] first_dim_offsets = np.array([off.as_py() for off in storage.offsets]) for i, is_null in enumerate(null_mask): if is_null: arrays.append(np.nan) else: storage_el = storage[i : i + 1] first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i] # flatten storage for _ in range(ndims): storage_el = storage_el.flatten() numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only) arrays.append(numpy_arr.reshape(first_dim, *shape[1:])) if len(np.unique(np.diff(first_dim_offsets))) > 1: # ragged numpy_arr = np.empty(len(arrays), dtype=object) numpy_arr[:] = arrays else: numpy_arr = np.array(arrays)
152
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
return numpy_arr def to_pylist(self): zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only) if self.type.shape[0] is None and numpy_arr.dtype == object: return [arr.tolist() for arr in numpy_arr.tolist()] else: return numpy_arr.tolist()
152
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class PandasArrayExtensionDtype(PandasExtensionDtype): _metadata = "value_type" def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]): self._value_type = value_type def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): if isinstance(array, pa.ChunkedArray): array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) return PandasArrayExtensionArray(numpy_arr) @classmethod def construct_array_type(cls): return PandasArrayExtensionArray @property def type(self) -> type: return np.ndarray @property def kind(self) -> str: return "O" @property def name(self) -> str: return f"array[{self.value_type}]" @property def value_type(self) -> np.dtype: return self._value_type
153
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class PandasArrayExtensionArray(PandasExtensionArray): def __init__(self, data: np.ndarray, copy: bool = False): self._data = data if not copy else np.array(data) self._dtype = PandasArrayExtensionDtype(data.dtype) def __array__(self, dtype=None): """ Convert to NumPy Array. Note that Pandas expects a 1D array when dtype is set to object. But for other dtypes, the returned shape is the same as the one of ``data``. More info about pandas 1D requirement for PandasExtensionArray here: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
""" if dtype == np.dtype(object): out = np.empty(len(self._data), dtype=object) for i in range(len(self._data)): out[i] = self._data[i] return out if dtype is None: return self._data else: return self._data.astype(dtype) def copy(self, deep: bool = False) -> "PandasArrayExtensionArray": return PandasArrayExtensionArray(self._data, copy=True)
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
@classmethod def _from_sequence( cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False ) -> "PandasArrayExtensionArray": if len(scalars) > 1 and all( isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars ): data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy) else: data = np.empty(len(scalars), dtype=object) data[:] = scalars return cls(data, copy=copy)
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
@classmethod def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray": if len(to_concat) > 1 and all( va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype for va in to_concat ): data = np.vstack([va._data for va in to_concat]) else: data = np.empty(len(to_concat), dtype=object) data[:] = [va._data for va in to_concat] return cls(data, copy=False) @property def dtype(self) -> PandasArrayExtensionDtype: return self._dtype @property def nbytes(self) -> int: return self._data.nbytes def isna(self) -> np.ndarray: return np.array([pd.isna(arr).any() for arr in self._data]) def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: raise NotImplementedError()
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]: if isinstance(item, int): return self._data[item] return PandasArrayExtensionArray(self._data[item], copy=False)
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def take( self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None ) -> "PandasArrayExtensionArray": indices: np.ndarray = np.asarray(indices, dtype=int) if allow_fill: fill_value = ( self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type) ) mask = indices == -1 if (indices < -1).any(): raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True") elif len(self) > 0: pass elif not np.all(mask): raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.") else: data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type) return PandasArrayExtensionArray(data, copy=False) took = self._data.take(indices, axis=0) if allow_fill and mask.any():
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
took[mask] = [fill_value] * np.sum(mask) return PandasArrayExtensionArray(took, copy=False)
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def __len__(self) -> int: return len(self._data) def __eq__(self, other) -> np.ndarray: if not isinstance(other, PandasArrayExtensionArray): raise NotImplementedError(f"Invalid type to compare to: {type(other)}") return (self._data == other._data).all()
154
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class ClassLabel: """Feature type for integer class labels. There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments: * `num_classes`: Create 0 to (num_classes-1) labels. * `names`: List of label strings. * `names_file`: File containing the list of labels. Under the hood the labels are stored as integers. You can use negative integers to represent unknown/missing labels. Args: num_classes (`int`, *optional*): Number of classes. All labels must be < `num_classes`. names (`list` of `str`, *optional*): String names for the integer classes. The order in which the names are provided is kept. names_file (`str`, *optional*): Path to a file with names for the integer classes, one per line. Example:
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
```py >>> from datasets import Features, ClassLabel >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])}) >>> features {'label': ClassLabel(names=['bad', 'ok', 'good'], id=None)} ``` """ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict names: List[str] = None names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "int64" pa_type: ClassVar[Any] = pa.int64() _str2int: ClassVar[Dict[str, int]] = None _int2str: ClassVar[Dict[int, int]] = None _type: str = field(default="ClassLabel", init=False, repr=False)
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def __post_init__(self, num_classes, names_file): self.num_classes = num_classes self.names_file = names_file if self.names_file is not None and self.names is not None: raise ValueError("Please provide either names or names_file but not both.") # Set self.names if self.names is None: if self.names_file is not None: self.names = self._load_names_from_file(self.names_file) elif self.num_classes is not None: self.names = [str(i) for i in range(self.num_classes)] else: raise ValueError("Please provide either num_classes, names or names_file.") elif not isinstance(self.names, SequenceABC): raise TypeError(f"Please provide names as a list, is {type(self.names)}") # Set self.num_classes if self.num_classes is None: self.num_classes = len(self.names) elif self.num_classes != len(self.names): raise ValueError(
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
"ClassLabel number of names do not match the defined num_classes. " f"Got {len(self.names)} names VS {self.num_classes} num_classes" ) # Prepare mappings self._int2str = [str(name) for name in self.names] self._str2int = {name: i for i, name in enumerate(self._int2str)} if len(self._int2str) != len(self._str2int): raise ValueError("Some label names are duplicated. Each label name should be unique.")
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def __call__(self): return self.pa_type def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: """Conversion class name `string` => `integer`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].str2int('neg') 0 ``` """ if not isinstance(values, str) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, str): values = [values] return_list = False output = [self._strval2int(value) for value in values] return output if return_list else output[0]
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def _strval2int(self, value: str) -> int: failed_parse = False value = str(value) # first attempt - raw string value int_value = self._str2int.get(value) if int_value is None: # second attempt - strip whitespace int_value = self._str2int.get(value.strip()) if int_value is None: # third attempt - convert str to int try: int_value = int(value) except ValueError: failed_parse = True else: if int_value < -1 or int_value >= self.num_classes: failed_parse = True if failed_parse: raise ValueError(f"Invalid string class label {value}") return int_value def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: """Conversion `integer` => class name `string`.
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Regarding unknown/missing labels: passing negative integers raises `ValueError`. Example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> ds.features["label"].int2str(0) 'neg' ``` """ if not isinstance(values, int) and not isinstance(values, Iterable): raise ValueError( f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)" ) return_list = True if isinstance(values, int): values = [values] return_list = False for v in values: if not 0 <= v < self.num_classes: raise ValueError(f"Invalid integer class label {v:d}") output = [self._int2str[int(v)] for v in values] return output if return_list else output[0]
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def encode_example(self, example_data): if self.num_classes is None: raise ValueError( "Trying to use ClassLabel feature with undefined number of class. " "Please set ClassLabel.names or num_classes." ) # If a string is given, convert to associated integer if isinstance(example_data, str): example_data = self.str2int(example_data) # Allowing -1 to mean no label. if not -1 <= example_data < self.num_classes: raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}") return example_data def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: """Cast an Arrow array to the `ClassLabel` arrow storage type. The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are: - `pa.string()` - `pa.int()`
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Args: storage (`Union[pa.StringArray, pa.IntegerArray]`): PyArrow array to cast. Returns: `pa.Int64Array`: Array in the `ClassLabel` arrow storage type. """ if isinstance(storage, pa.IntegerArray) and len(storage) > 0: min_max = pc.min_max(storage).as_py() if min_max["max"] is not None and min_max["max"] >= self.num_classes: raise ValueError( f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}" ) elif isinstance(storage, pa.StringArray): storage = pa.array( [self._strval2int(label) if label is not None else None for label in storage.to_pylist()] ) return array_cast(storage, self.pa_type)
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
@staticmethod def _load_names_from_file(names_filepath): with open(names_filepath, encoding="utf-8") as f: return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
155
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Sequence: """Construct a list of feature from a single type or a dict of types. Mostly here for compatiblity with tfds. Args: feature ([`FeatureType`]): A list of features of a single type or a dictionary of types. length (`int`): Length of the sequence. Example: ```py >>> from datasets import Features, Sequence, Value, ClassLabel >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})}) >>> features {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(names=['hot', 'cold'], id=None)}, length=-1, id=None)} ``` """
156
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
feature: Any length: int = -1 id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "list" pa_type: ClassVar[Any] = None _type: str = field(default="Sequence", init=False, repr=False)
156
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class LargeList: """Feature type for large list data composed of child feature data type. It is backed by `pyarrow.LargeListType`, which is like `pyarrow.ListType` but with 64-bit rather than 32-bit offsets. Args: feature ([`FeatureType`]): Child feature data type of each item within the large list. """ feature: Any id: Optional[str] = None # Automatically constructed pa_type: ClassVar[Any] = None _type: str = field(default="LargeList", init=False, repr=False)
157
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Features(dict): """A special dictionary that defines the internal structure of a dataset. Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names, and values are the type of that column. `FieldType` can be one of the following: - [`Value`] feature specifies a single data type value, e.g. `int64` or `string`. - [`ClassLabel`] feature specifies a predefined set of classes which can have labels associated to them and will be stored as integers in the dataset. - Python `dict` specifies a composite feature containing a mapping of sub-fields to sub-features. It's possible to have nested fields of nested fields in an arbitrary manner. - Python `list`, [`LargeList`] or [`Sequence`] specifies a composite feature containing a sequence of sub-features, all of the same feature type. <Tip>
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
A [`Sequence`] with an internal dictionary feature will be automatically converted into a dictionary of lists. This behavior is implemented to have a compatibility layer with the TensorFlow Datasets library but may be un-wanted in some cases. If you don't want this behavior, you can use a Python `list` or a [`LargeList`] instead of the [`Sequence`]. </Tip>
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
- [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays. - [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data. - [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data. - [`Translation`] or [`TranslationVariableLanguages`] feature specific to Machine Translation. """
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def __init__(*args, **kwargs): # self not in the signature to allow passing self as a kwarg if not args: raise TypeError("descriptor '__init__' of 'Features' object needs an argument") self, *args = args super(Features, self).__init__(*args, **kwargs) self._column_requires_decoding: Dict[str, bool] = { col: require_decoding(feature) for col, feature in self.items() } __setitem__ = keep_features_dicts_synced(dict.__setitem__) __delitem__ = keep_features_dicts_synced(dict.__delitem__) update = keep_features_dicts_synced(dict.update) setdefault = keep_features_dicts_synced(dict.setdefault) pop = keep_features_dicts_synced(dict.pop) popitem = keep_features_dicts_synced(dict.popitem) clear = keep_features_dicts_synced(dict.clear) def __reduce__(self): return Features, (dict(self),) @property def type(self): """ Features field types.
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Returns: :obj:`pyarrow.DataType` """ return get_nested_type(self) @property def arrow_schema(self): """ Features schema. Returns: :obj:`pyarrow.Schema` """ hf_metadata = {"info": {"features": self.to_dict()}} return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)}) @classmethod def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features": """ Construct [`Features`] from Arrow Schema. It also checks the schema metadata for Hugging Face Datasets features. Non-nullable fields are not supported and set to nullable. Also, pa.dictionary is not supported and it uses its underlying type instead. Therefore datasets convert DictionaryArray objects to their actual values. Args: pa_schema (`pyarrow.Schema`): Arrow Schema.
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Returns: [`Features`] """ # try to load features from the arrow schema metadata metadata_features = Features() if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata: metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode()) if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None: metadata_features = Features.from_dict(metadata["info"]["features"]) metadata_features_schema = metadata_features.arrow_schema obj = { field.name: ( metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) ) for field in pa_schema } return cls(**obj)
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
@classmethod def from_dict(cls, dic) -> "Features": """ Construct [`Features`] from dict. Regenerate the nested feature object from a deserialized dict. We use the `_type` key to infer the dataclass name of the feature `FieldType`. It allows for a convenient constructor syntax to define features from deserialized JSON dictionaries. This function is used in particular when deserializing a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes that [`Value`] automatically performs. Args: dic (`dict[str, Any]`): Python dictionary. Returns: `Features`
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Example:: >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}}) {'_type': Value(dtype='string', id=None)} """ obj = generate_from_dict(dic) return cls(**obj) def to_dict(self): return asdict(self) def _to_yaml_list(self) -> list: # we compute the YAML list from the dict representation that is used for JSON dump yaml_data = self.to_dict() def simplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
for list_type in ["large_list", "list", "sequence"]: # # list_type: -> list_type: int32 # dtype: int32 -> # if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["dtype"]: feature[list_type] = feature[list_type]["dtype"] # # list_type: -> list_type: # struct: -> - name: foo # - name: foo -> dtype: int32 # dtype: int32 -> # if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ["struct"]: feature[list_type] = feature[list_type]["struct"]
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
# # class_label: -> class_label: # names: -> names: # - negative -> '0': negative # - positive -> '1': positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list): # server-side requirement: keys must be strings feature["class_label"]["names"] = { str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"]) } return feature
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def to_yaml_inner(obj: Union[dict, list]) -> dict: if isinstance(obj, dict): _type = obj.pop("_type", None) if _type == "LargeList": _feature = obj.pop("feature") return simplify({"large_list": to_yaml_inner(_feature), **obj}) elif _type == "Sequence": _feature = obj.pop("feature") return simplify({"sequence": to_yaml_inner(_feature), **obj}) elif _type == "Value": return obj elif _type and not obj: return {"dtype": camelcase_to_snakecase(_type)} elif _type: return {"dtype": simplify({camelcase_to_snakecase(_type): obj})} else: return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]} elif isinstance(obj, list):
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
return simplify({"list": simplify(to_yaml_inner(obj[0]))}) elif isinstance(obj, tuple): return to_yaml_inner(list(obj)) else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def to_yaml_types(obj: dict) -> dict: if isinstance(obj, dict): return {k: to_yaml_types(v) for k, v in obj.items()} elif isinstance(obj, list): return [to_yaml_types(v) for v in obj] elif isinstance(obj, tuple): return to_yaml_types(list(obj)) else: return obj return to_yaml_types(to_yaml_inner(yaml_data)["struct"]) @classmethod def _from_yaml_list(cls, yaml_data: list) -> "Features": yaml_data = copy.deepcopy(yaml_data) # we convert the list obtained from YAML data into the dict representation that is used for JSON dump def unsimplify(feature: dict) -> dict: if not isinstance(feature, dict): raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
for list_type in ["large_list", "list", "sequence"]: # # list_type: int32 -> list_type: # -> dtype: int32 # if isinstance(feature.get(list_type), str): feature[list_type] = {"dtype": feature[list_type]}
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
# # class_label: -> class_label: # names: -> names: # '0': negative -> - negative # '1': positive -> - positive # if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict): label_ids = sorted(feature["class_label"]["names"], key=int) if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): raise ValueError( f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing." ) feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids] return feature
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: if isinstance(obj, dict): if not obj: return {} _type = next(iter(obj)) if _type == "large_list": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "LargeList"} if _type == "sequence": _feature = unsimplify(obj).pop(_type) return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"} if _type == "list": return [from_yaml_inner(unsimplify(obj)[_type])] if _type == "struct": return from_yaml_inner(obj["struct"]) elif _type == "dtype": if isinstance(obj["dtype"], str): # e.g. int32, float64, string, audio, image try:
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Value(obj["dtype"]) return {**obj, "_type": "Value"} except ValueError: # e.g. Audio, Image, ArrayXD return {"_type": snakecase_to_camelcase(obj["dtype"])} else: return from_yaml_inner(obj["dtype"]) else: return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} elif isinstance(obj, list): names = [_feature.pop("name") for _feature in obj] return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)} else: raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
return cls.from_dict(from_yaml_inner(yaml_data)) def encode_example(self, example): """ Encode example into a format for Arrow. Args: example (`dict[str, Any]`): Data in a Dataset row. Returns: `dict[str, Any]` """ example = cast_to_python_objects(example) return encode_nested_example(self, example) def encode_column(self, column, column_name: str): """ Encode column into a format for Arrow. Args: column (`list[Any]`): Data in a Dataset column. column_name (`str`): Dataset column name. Returns: `list[Any]` """ column = cast_to_python_objects(column) return [encode_nested_example(self[column_name], obj, level=1) for obj in column] def encode_batch(self, batch): """ Encode batch into a format for Arrow.
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Args: batch (`dict[str, list[Any]]`): Data in a Dataset batch. Returns: `dict[str, list[Any]]` """ encoded_batch = {} if set(batch) != set(self): raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}") for key, column in batch.items(): column = cast_to_python_objects(column) encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column] return encoded_batch def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode example with custom feature decoding.
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Args: example (`dict[str, Any]`): Dataset row data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary `repo_id (str) -> token (bool or str)`. Returns: `dict[str, Any]` """ return { column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for column_name, (feature, value) in zip_dict( {key: value for key, value in self.items() if key in example}, example ) } def decode_column(self, column: list, column_name: str): """Decode column with custom feature decoding. Args: column (`list[Any]`): Dataset column data. column_name (`str`): Dataset column name.
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Returns: `list[Any]` """ return ( [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column ) def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None): """Decode batch with custom feature decoding. Args: batch (`dict[str, list[Any]]`): Dataset batch data. token_per_repo_id (`dict`, *optional*): To access and decode audio or image files from private repositories on the Hub, you can pass a dictionary repo_id (str) -> token (bool or str)
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
Returns: `dict[str, list[Any]]` """ decoded_batch = {} for column_name, column in batch.items(): decoded_batch[column_name] = ( [ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column ] if self._column_requires_decoding[column_name] else column ) return decoded_batch def copy(self) -> "Features": """ Make a deep copy of [`Features`]. Returns: [`Features`] Example:
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="train") >>> copy_of_features = ds.features.copy() >>> copy_of_features {'label': ClassLabel(names=['neg', 'pos'], id=None), 'text': Value(dtype='string', id=None)} ``` """ return copy.deepcopy(self) def reorder_fields_as(self, other: "Features") -> "Features": """ Reorder Features fields to match the field order of other [`Features`]. The order of the fields is important since it matters for the underlying arrow data. Re-ordering the fields allows to make the underlying arrow data type match. Args: other ([`Features`]): The other [`Features`] to align with. Returns: [`Features`] Example::
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
>>> from datasets import Features, Sequence, Value >>> # let's say we have two features with a different order of nested fields (for a and b for example) >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})}) >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}}) >>> assert f1.type != f2.type >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but makes the fields order match >>> f1.reorder_fields_as(f2) {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)} >>> assert f1.reorder_fields_as(f2).type == f2.type """
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
def recursive_reorder(source, target, stack=""): stack_position = " at " + stack[1:] if stack else "" if isinstance(target, Sequence): target = target.feature if isinstance(target, dict): target = {k: [v] for k, v in target.items()} else: target = [target] if isinstance(source, Sequence): sequence_kwargs = vars(source).copy() source = sequence_kwargs.pop("feature") if isinstance(source, dict): source = {k: [v] for k, v in source.items()} reordered = recursive_reorder(source, target, stack) return Sequence({k: v[0] for k, v in reordered.items()}, **sequence_kwargs) else: source = [source] reordered = recursive_reorder(source, target, stack) return Sequence(reordered[0], **sequence_kwargs)
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
elif isinstance(source, dict): if not isinstance(target, dict): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if sorted(source) != sorted(target): message = ( f"Keys mismatch: between {source} (source) and {target} (target).\n" f"{source.keys() - target.keys()} are missing from target " f"and {target.keys() - source.keys()} are missing from source" + stack_position ) raise ValueError(message) return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target} elif isinstance(source, list): if not isinstance(target, list): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) if len(source) != len(target):
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position) return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))] elif isinstance(source, LargeList): if not isinstance(target, LargeList): raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position) return LargeList(recursive_reorder(source.feature, target.feature, stack)) else: return source
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
return Features(recursive_reorder(self, other)) def flatten(self, max_depth=16) -> "Features": """Flatten the features. Every dictionary column is removed and is replaced by all the subfields it contains. The new fields are named by concatenating the name of the original column and the subfield name like this: `<original>.<subfield>`. If a column contains nested dictionaries, then all the lower-level subfields names are also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc. Returns: [`Features`]: The flattened features. Example:
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("squad", split="train") >>> ds.features.flatten() {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None), 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` """ for depth in range(1, max_depth): no_change = True flattened = self.copy() for column_name, subfeature in self.items(): if isinstance(subfeature, dict): no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()}) del flattened[column_name]
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): no_change = False flattened.update( { f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v] for k, v in subfeature.feature.items() } ) del flattened[column_name] elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature: no_change = False flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()}) del flattened[column_name] self = flattened if no_change: break return self
158
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/features.py
class Translation: """`Feature` for translations with fixed languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to string translations. Example: ```python >>> # At construction time: >>> datasets.features.Translation(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': 'le chat', ... 'de': 'die katze' ... } ``` """ languages: List[str] id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="Translation", init=False, repr=False) def __call__(self): return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
159
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """Flatten the Translation feature into a dictionary.""" from .features import Value return {k: Value("string") for k in sorted(self.languages)}
159
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py
class TranslationVariableLanguages: """`Feature` for translations with variable languages per example. Here for compatiblity with tfds. Args: languages (`dict`): A dictionary for each example mapping string language codes to one or more string translations. The languages present may vary from example to example. Returns: - `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`): Language codes sorted in ascending order or plain text translations, sorted to align with language codes. Example:
160
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py
```python >>> # At construction time: >>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de']) >>> # During data generation: >>> yield { ... 'en': 'the cat', ... 'fr': ['le chat', 'la chatte,'] ... 'de': 'die katze' ... } >>> # Tensor returned : >>> { ... 'language': ['en', 'de', 'fr', 'fr'], ... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'], ... } ``` """ languages: Optional[List] = None num_languages: Optional[int] = None id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = None _type: str = field(default="TranslationVariableLanguages", init=False, repr=False) def __post_init__(self): self.languages = sorted(set(self.languages)) if self.languages else None self.num_languages = len(self.languages) if self.languages else None
160
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py
def __call__(self): return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def encode_example(self, translation_dict): lang_set = set(self.languages) if set(translation_dict) == {"language", "translation"}: return translation_dict elif self.languages and set(translation_dict) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. translation_tuples = [] for lang, text in translation_dict.items(): if isinstance(text, str): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text])
160
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py
# Ensure translations are in ascending order by language code. languages, translations = zip(*sorted(translation_tuples)) return {"language": languages, "translation": translations} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """Flatten the TranslationVariableLanguages feature into a dictionary.""" from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
160
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/translation.py
class Audio: """Audio [`Feature`] to extract audio data from an audio file. Input: The Audio feature accepts as input: - A `str`: Absolute path to the audio file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `bytes`: Bytes content of the audio file. This is useful for archived files with sequential access. - A `dict` with the keys: - `path`: String with relative path of the audio file to the archive file. - `array`: Array containing the audio sample - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample. This is useful for archived files with sequential access.
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
Args: sampling_rate (`int`, *optional*): Target sampling rate. If `None`, the native sampling rate is used. mono (`bool`, defaults to `True`): Whether to convert the audio signal to mono by averaging samples across channels. decode (`bool`, defaults to `True`): Whether to decode the audio data. If `False`, returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`. Example:
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
```py >>> from datasets import load_dataset, Audio >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000)) >>> ds[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` """ sampling_rate: Optional[int] = None mono: bool = True decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "dict" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Audio", init=False, repr=False) def __call__(self): return self.pa_type
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
def encode_example(self, value: Union[str, bytes, dict]) -> dict: """Encode example into a format for Arrow. Args: value (`str` or `dict`): Data passed as input to Audio feature.
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
Returns: `dict` """ try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'.") from err if isinstance(value, str): return {"bytes": None, "path": value} elif isinstance(value, bytes): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes buffer = BytesIO() sf.write(buffer, value["array"], value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm"): # "PCM" only has raw audio bytes
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
if value.get("sampling_rate") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object") if value.get("bytes"): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767 else: bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
buffer = BytesIO(bytes()) sf.write(buffer, bytes_value, value["sampling_rate"], format="wav") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example( self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict: """Decode example audio file into audio data. Args: value (`dict`): A dictionary with keys:
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
- `path`: String with relative audio file path. - `bytes`: Bytes of the audio file. token_per_repo_id (`dict`, *optional*): To access and decode audio files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`) Returns: `dict` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.") path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err audio_format = xsplitext(path)[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
if file is None: token_per_repo_id = token_per_repo_id or {} source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id[repo_id] except (ValueError, KeyError): token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: array, sampling_rate = sf.read(f) else: array, sampling_rate = sf.read(file)
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
array = array.T if self.mono: array = librosa.to_mono(array) if self.sampling_rate and self.sampling_rate != sampling_rate: array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate) sampling_rate = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.""" from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature.") return { "bytes": Value("binary"), "path": Value("string"), }
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray: """Cast an Arrow array to the Audio arrow storage type. The Arrow types that can be converted to the Audio pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the audio bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter Args: storage (`Union[pa.StringArray, pa.StructArray]`): PyArrow array to cast.
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})` """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"): storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()]) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes")
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) return array_cast(storage, self.pa_type)
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed audio files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Audio arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ @no_op_if_value_is_null def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type)
161
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/audio.py
class Video: """ **Experimental.** Video [`Feature`] to read video data from a video file. Input: The Video feature accepts as input: - A `str`: Absolute path to the video file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the video file in a dataset repository. - `bytes`: Bytes of the video file. This is useful for archived files with sequential access. - A `decord.VideoReader`: decord video reader object. Args: mode (`str`, *optional*): The mode to convert the video to. If `None`, the native mode of the video is used. decode (`bool`, defaults to `True`): Whether to decode the video data. If `False`, returns the underlying dictionary in the format `{"path": video_path, "bytes": video_bytes}`. Examples:
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
```py >>> from datasets import Dataset, Video >>> ds = Dataset.from_dict({"video":["path/to/Screen Recording.mov"]}).cast_column("video", Video()) >>> ds.features["video"] Video(decode=True, id=None) >>> ds[0]["video"] <decord.video_reader.VideoReader at 0x105525c70> >>> ds = ds.cast_column('video', Video(decode=False)) {'bytes': None, 'path': 'path/to/Screen Recording.mov'} ``` """ decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "decord.VideoReader" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Video", init=False, repr=False) def __post_init__(self): if config.DECORD_AVAILABLE: patch_decord() def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "VideoReader"]) -> dict: """Encode example into a format for Arrow.
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
Args: value (`str`, `np.ndarray`, `VideoReader` or `dict`): Data passed as input to Video feature. Returns: `dict` with "path" and "bytes" fields """ if config.DECORD_AVAILABLE: from decord import VideoReader else: VideoReader = None if isinstance(value, list): value = np.array(value)
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, bytes): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the video array to bytes return encode_np_array(value) elif VideoReader and isinstance(value, VideoReader): # convert the decord video reader to bytes return encode_decord_video(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the video bytes, and path is used to infer the video format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError(
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
f"A video sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
def decode_example(self, value: dict, token_per_repo_id=None) -> "VideoReader": """Decode example video file into video data. Args: value (`str` or `dict`): A string with the absolute video file path, a dictionary with keys: - `path`: String with absolute or relative video file path. - `bytes`: The bytes of the video file. token_per_repo_id (`dict`, *optional*): To access and decode video files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `decord.VideoReader` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Video(decode=True) instead.") if config.DECORD_AVAILABLE: from decord import VideoReader
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
else: raise ImportError("To support decoding videos, please install 'decord'.") if token_per_repo_id is None: token_per_repo_id = {}
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"A video should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): video = VideoReader(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id.get(repo_id) except ValueError: token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f:
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
bytes_ = BytesIO(f.read()) video = VideoReader(bytes_) else: video = VideoReader(BytesIO(bytes_)) return video
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Video arrow storage type. The Arrow types that can be converted to the Video pyarrow storage type are:
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py
- `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the video bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the video array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast.
162
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/features/video.py