text
stringlengths 1
1.02k
| class_index
int64 0
271
| source
stringclasses 76
values |
---|---|---|
class SearchResults(NamedTuple):
scores: List[float]
indices: List[int] | 59 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class BatchedSearchResults(NamedTuple):
total_scores: List[List[float]]
total_indices: List[List[int]] | 60 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class NearestExamplesResults(NamedTuple):
scores: List[float]
examples: dict | 61 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class BatchedNearestExamplesResults(NamedTuple):
total_scores: List[List[float]]
total_examples: List[dict] | 62 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class BaseIndex:
"""Base class for indexing"""
def search(self, query, k: int = 10, **kwargs) -> SearchResults:
"""
To implement.
This method has to return the scores and the indices of the retrieved examples given a certain query.
"""
raise NotImplementedError
def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the query.
Args:
queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.
k (`int`): The number of examples to retrieve per query. | 63 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
total_scores, total_indices = [], []
for query in queries:
scores, indices = self.search(query, k)
total_scores.append(scores)
total_indices.append(indices)
return BatchedSearchResults(total_scores, total_indices)
def save(self, file: Union[str, PurePath]):
"""Serialize the index on disk"""
raise NotImplementedError
@classmethod
def load(cls, file: Union[str, PurePath]) -> "BaseIndex":
"""Deserialize the index from disk"""
raise NotImplementedError | 63 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class ElasticSearchIndex(BaseIndex):
"""
Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.
An Elasticsearch server needs to be accessible, and a python client is declared with
```
es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
```
for example.
""" | 64 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
if not _has_elasticsearch:
raise ImportError(
"You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`"
)
if es_client is not None and (host is not None or port is not None):
raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.")
host = host or "localhost"
port = port or 9200
import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features
from elasticsearch import Elasticsearch # noqa: F811 | 64 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}])
self.es_index_name = (
es_index_name
if es_index_name is not None
else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name)
)
self.es_index_config = (
es_index_config
if es_index_config is not None
else {
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
}
) | 64 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None):
"""
Add documents to the index.
If the documents are inside a certain column, you can specify it using the `column` argument.
"""
index_name = self.es_index_name
index_config = self.es_index_config
self.es_client.indices.create(index=index_name, body=index_config)
number_of_docs = len(documents)
progress = hf_tqdm(unit="docs", total=number_of_docs)
successes = 0
def passage_generator():
if column is not None:
for i, example in enumerate(documents):
yield {"text": example[column], "_id": i}
else:
for i, example in enumerate(documents):
yield {"text": example, "_id": i}
# create the ES index
import elasticsearch as es | 64 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
for ok, action in es.helpers.streaming_bulk(
client=self.es_client,
index=index_name,
actions=passage_generator(),
):
progress.update(1)
successes += ok
if successes != len(documents):
logger.warning(
f"Some documents failed to be added to ElasticSearch. Failures: {len(documents) - successes}/{len(documents)}"
)
logger.info(f"Indexed {successes:d} documents")
def search(self, query: str, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`str`): The query as a string.
k (`int`): The number of examples to retrieve. | 64 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
response = self.es_client.search(
index=self.es_index_name,
body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k},
**kwargs,
)
hits = response["hits"]["hits"]
return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits])
def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults:
import concurrent.futures | 64 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
total_scores, total_indices = [None] * len(queries), [None] * len(queries)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)}
for future in concurrent.futures.as_completed(future_to_index):
index = future_to_index[future]
results: SearchResults = future.result()
total_scores[index] = results.scores
total_indices[index] = results.indices
return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores) | 64 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class FaissIndex(BaseIndex):
"""
Dense index using Faiss. It is used to index vectors.
Faiss is a library for efficient similarity search and clustering of dense vectors.
It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
You can find more information about Faiss here:
- For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory
- For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
""" | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def __init__(
self,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
):
"""
Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
"""
if string_factory is not None and custom_index is not None:
raise ValueError("Please specify either `string_factory` or `custom_index` but not both.")
if device is not None and custom_index is not None:
raise ValueError(
"Cannot pass both 'custom_index' and 'device'. "
"Pass 'custom_index' already transferred to the target device instead."
)
self.device = device | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
self.string_factory = string_factory
self.metric_type = metric_type
self.faiss_index = custom_index
if not _has_faiss:
raise ImportError(
"You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. "
"A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. "
"Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available."
) | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def add_vectors(
self,
vectors: Union[np.array, "Dataset"],
column: Optional[str] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: Optional[bool] = None,
):
"""
Add vectors to the index.
If the arrays are inside a certain column, you can specify it using the `column` argument.
"""
import faiss # noqa: F811
if column and not isinstance(vectors.features[column], Sequence):
raise ValueError(
f"Wrong feature type for column '{column}'. Expected 1d array, got {vectors.features[column]}"
) | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
# Create index
if self.faiss_index is None:
size = len(vectors[0]) if column is None else len(vectors[0][column])
if self.string_factory is not None:
if self.metric_type is None:
index = faiss.index_factory(size, self.string_factory)
else:
index = faiss.index_factory(size, self.string_factory, self.metric_type)
else:
if self.metric_type is None:
index = faiss.IndexFlat(size)
else:
index = faiss.IndexFlat(size, self.metric_type)
self.faiss_index = self._faiss_index_to_device(index, self.device)
logger.info(f"Created faiss index of type {type(self.faiss_index)}") | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
# Set verbosity level
if faiss_verbose is not None:
self.faiss_index.verbose = faiss_verbose
if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None:
self.faiss_index.index.verbose = faiss_verbose
if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None:
self.faiss_index.quantizer.verbose = faiss_verbose
if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None:
self.faiss_index.clustering_index.verbose = faiss_verbose
# Train
if train_size is not None:
train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]
logger.info(f"Training the index with the first {len(train_vecs)} vectors")
self.faiss_index.train(train_vecs)
else:
logger.info("Ignored the training step of the faiss index as `train_size` is None.") | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
# Add vectors
logger.info(f"Adding {len(vectors)} vectors to the faiss index")
for i in hf_tqdm(range(0, len(vectors), batch_size)):
vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
self.faiss_index.add(vecs)
@staticmethod
def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index":
"""
Sends a faiss index to a device.
A device can either be a positive integer (GPU id), a negative integer (all GPUs),
or a list of positive integers (select GPUs to use), or `None` for CPU.
"""
# If device is not specified, then it runs on CPU.
if device is None:
return index
import faiss # noqa: F811 | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
# If the device id is given as an integer
if isinstance(device, int):
# Positive integers are directly mapped to GPU ids
if device > -1:
faiss_res = faiss.StandardGpuResources()
index = faiss.index_cpu_to_gpu(faiss_res, device, index)
# And negative integers mean using all GPUs
else:
index = faiss.index_cpu_to_all_gpus(index)
# Device ids given as a list mean mapping to those devices specified.
elif isinstance(device, (list, tuple)):
index = faiss.index_cpu_to_gpus_list(index, gpus=list(device))
else:
raise TypeError(
f"The argument type: {type(device)} is not expected. "
+ "Please pass in either nothing, a positive int, a negative int, or a list of positive ints."
)
return index | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def search(self, query: np.array, k=10, **kwargs) -> SearchResults:
"""Find the nearest examples indices to the query.
Args:
query (`np.array`): The query as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
scores (`List[List[float]`): The retrieval scores of the retrieved examples.
indices (`List[List[int]]`): The indices of the retrieved examples.
"""
if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1):
raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)")
queries = query.reshape(1, -1)
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return SearchResults(scores[0], indices[0].astype(int)) | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults:
"""Find the nearest examples indices to the queries.
Args:
queries (`np.array`): The queries as a numpy array.
k (`int`): The number of examples to retrieve.
Ouput:
total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
"""
if len(queries.shape) != 2:
raise ValueError("Shape of query must be 2D")
if not queries.flags.c_contiguous:
queries = np.asarray(queries, order="C")
scores, indices = self.faiss_index.search(queries, k, **kwargs)
return BatchedSearchResults(scores, indices.astype(int))
def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Serialize the FaissIndex on disk"""
import faiss # noqa: F811 | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
if self.device is not None and isinstance(self.device, (int, list, tuple)):
index = faiss.index_gpu_to_cpu(self.faiss_index)
else:
index = self.faiss_index
with fsspec.open(str(file), "wb", **(storage_options or {})) as f:
faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write)))
@classmethod
def load(
cls,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
) -> "FaissIndex":
"""Deserialize the FaissIndex from disk"""
import faiss # noqa: F811 | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
# Instances of FaissIndex is essentially just a wrapper for faiss indices.
faiss_index = cls(device=device)
with fsspec.open(str(file), "rb", **(storage_options or {})) as f:
index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read)))
faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device)
return faiss_index | 65 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class IndexableMixin:
"""Add indexing features to `datasets.Dataset`"""
def __init__(self):
self._indexes: Dict[str, BaseIndex] = {}
def __len__(self):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def is_index_initialized(self, index_name: str) -> bool:
return index_name in self._indexes
def _check_index_is_initialized(self, index_name: str):
if not self.is_index_initialized(index_name):
raise MissingIndex(
f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first."
)
def list_indexes(self) -> List[str]:
"""List the `colindex_nameumns`/identifiers of all the attached indexes."""
return list(self._indexes)
def get_index(self, index_name: str) -> BaseIndex:
"""List the `index_name`/identifiers of all the attached indexes. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
index_name (`str`): Index name.
Returns:
[`BaseIndex`]
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name] | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def add_faiss_index(
self,
column: str,
index_name: Optional[str] = None,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of the specified column.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
column (`str`): The column of the vectors to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
index_name = index_name if index_name is not None else column
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def add_faiss_index_from_external_arrays(
self,
external_arrays: np.array,
index_name: str,
device: Optional[Union[int, List[int]]] = None,
string_factory: Optional[str] = None,
metric_type: Optional[int] = None,
custom_index: Optional["faiss.Index"] = None,
batch_size: int = 1000,
train_size: Optional[int] = None,
faiss_verbose: bool = False,
):
"""Add a dense index using Faiss for fast retrieval.
The index is created using the vectors of `external_arrays`.
You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
You can find more information about Faiss here:
- For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
<Added version="2.4.0"/>
train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
"""
faiss_index = FaissIndex(
device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
)
faiss_index.add_vectors(
external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
)
self._indexes[index_name] = faiss_index | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
"""Save a FaissIndex on disk.
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/>
"""
index = self.get_index(index_name)
if not isinstance(index, FaissIndex):
raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'")
index.save(file, storage_options=storage_options)
logger.info(f"Saved FaissIndex {index_name} at {file}") | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def load_faiss_index(
self,
index_name: str,
file: Union[str, PurePath],
device: Optional[Union[int, List[int]]] = None,
storage_options: Optional[Dict] = None,
):
"""Load a FaissIndex from disk.
If you want to do additional configurations, you can have access to the faiss index object by doing
`.get_index(index_name).faiss_index` to make it fit your needs. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to
call `.get_nearest` or `.search`.
file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the file-system backend, if any.
<Added version="2.11.0"/> | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
"""
index = FaissIndex.load(file, device=device, storage_options=storage_options)
if index.faiss_index.ntotal != len(self):
raise ValueError(
f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples."
)
self._indexes[index_name] = index
logger.info(f"Loaded FaissIndex {index_name} from {file}")
def add_elasticsearch_index(
self,
column: str,
index_name: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_name: Optional[str] = None,
es_index_config: Optional[dict] = None,
):
"""Add a text index using ElasticSearch for fast retrieval. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
column (`str`): The column of the documents to add to the index.
index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.
By default it corresponds to `column`.
host (Optional `str`, defaults to localhost):
host of where ElasticSearch is running
port (Optional `str`, defaults to 9200):
port of where ElasticSearch is running
es_client (Optional `elasticsearch.Elasticsearch`):
The elasticsearch client used to create the index if host and port are None.
es_index_name (Optional `str`): The elasticsearch index name used to create the index.
es_index_config (Optional `dict`):
The configuration of the elasticsearch index.
Default config is:
Config:: | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
{
"settings": {
"number_of_shards": 1,
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
"""
index_name = index_name if index_name is not None else column
es_index = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
)
es_index.add_documents(self, column=column)
self._indexes[index_name] = es_index | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def load_elasticsearch_index(
self,
index_name: str,
es_index_name: str,
host: Optional[str] = None,
port: Optional[int] = None,
es_client: Optional["Elasticsearch"] = None,
es_index_config: Optional[dict] = None,
):
"""Load an existing text index using ElasticSearch for fast retrieval. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
index_name (`str`):
The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`.
es_index_name (`str`):
The name of elasticsearch index to load.
host (`str`, *optional*, defaults to `localhost`):
Host of where ElasticSearch is running.
port (`str`, *optional*, defaults to `9200`):
Port of where ElasticSearch is running.
es_client (`elasticsearch.Elasticsearch`, *optional*):
The elasticsearch client used to create the index if host and port are `None`.
es_index_config (`dict`, *optional*):
The configuration of the elasticsearch index.
Default config is:
```
{
"settings": {
"number_of_shards": 1, | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
"analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
},
"mappings": {
"properties": {
"text": {
"type": "text",
"analyzer": "standard",
"similarity": "BM25"
},
}
},
}
```
"""
self._indexes[index_name] = ElasticSearchIndex(
host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
) | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def drop_index(self, index_name: str):
"""Drop the index with the specified column.
Args:
index_name (`str`):
The `index_name`/identifier of the index.
"""
del self._indexes[index_name]
def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults:
"""Find the nearest examples indices in the dataset to the query.
Args:
index_name (`str`):
The name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Returns:
`(scores, indices)`:
A tuple of `(scores, indices)` where:
- **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **indices** (`List[List[int]]`): the indices of the retrieved examples
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search(query, k, **kwargs)
def search_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedSearchResults:
"""Find the nearest examples indices in the dataset to the query. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query.
Returns:
`(total_scores, total_indices)`:
A tuple of `(total_scores, total_indices)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
return self._indexes[index_name].search_batch(queries, k, **kwargs) | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
def get_nearest_examples(
self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs
) -> NearestExamplesResults:
"""Find the nearest examples in the dataset to the query.
Args:
index_name (`str`):
The index_name/identifier of the index.
query (`Union[str, np.ndarray]`):
The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Returns:
`(scores, examples)`:
A tuple of `(scores, examples)` where:
- **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
- **examples** (`dict`): the retrieved examples
"""
self._check_index_is_initialized(index_name)
scores, indices = self.search(index_name, query, k, **kwargs)
top_indices = [i for i in indices if i >= 0]
return NearestExamplesResults(scores[: len(top_indices)], self[top_indices])
def get_nearest_examples_batch(
self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
) -> BatchedNearestExamplesResults:
"""Find the nearest examples in the dataset to the query. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Args:
index_name (`str`):
The `index_name`/identifier of the index.
queries (`Union[List[str], np.ndarray]`):
The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
k (`int`):
The number of examples to retrieve per query. | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
Returns:
`(total_scores, total_examples)`:
A tuple of `(total_scores, total_examples)` where:
- **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
- **total_examples** (`List[dict]`): the retrieved examples per query
"""
self._check_index_is_initialized(index_name)
total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs)
total_scores = [
scores_i[: len([i for i in indices_i if i >= 0])]
for scores_i, indices_i in zip(total_scores, total_indices)
]
total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices]
return BatchedNearestExamplesResults(total_scores, total_samples) | 66 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/search.py |
class DatasetsError(Exception):
"""Base class for exceptions in this library.""" | 67 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct.""" | 68 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
"""FileNotFoundError raised by this library.""" | 69 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class DataFilesNotFoundError(FileNotFoundDatasetsError):
"""No (supported) data files found.""" | 70 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class DatasetNotFoundError(FileNotFoundDatasetsError):
"""Dataset not found.
Raised when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
""" | 71 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class DatasetBuildError(DatasetsError):
pass | 72 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class ManualDownloadError(DatasetBuildError):
pass | 73 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class FileFormatError(DatasetBuildError):
pass | 74 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class DatasetGenerationError(DatasetBuildError):
pass | 75 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class DatasetGenerationCastError(DatasetGenerationError):
@classmethod
def from_cast_error(
cls,
cast_error: CastError,
builder_name: str,
gen_kwargs: Dict[str, Any],
token: Optional[Union[bool, str]],
) -> "DatasetGenerationCastError":
explanation_message = (
f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
)
formatted_tracked_gen_kwargs: List[str] = []
for gen_kwarg in gen_kwargs.values():
if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterableFromGenerator)):
continue
while (
isinstance(gen_kwarg, (tracked_list, TrackedIterableFromGenerator)) and gen_kwarg.last_item is not None
):
gen_kwarg = gen_kwarg.last_item
if isinstance(gen_kwarg, tracked_str):
gen_kwarg = gen_kwarg.get_origin() | 76 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
gen_kwarg = "hf://" + resolved_path.unresolve()
if "@" + resolved_path.revision in gen_kwarg:
gen_kwarg = (
gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ f" (at revision {resolved_path.revision})"
)
formatted_tracked_gen_kwargs.append(str(gen_kwarg))
if formatted_tracked_gen_kwargs:
explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)" | 76 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
return cls("An error occurred while generating the dataset" + explanation_message + help_message) | 76 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class ChecksumVerificationError(DatasetsError):
"""Error raised during checksums verifications of downloaded files.""" | 77 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class UnexpectedDownloadedFileError(ChecksumVerificationError):
"""Some downloaded files were not expected.""" | 78 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class ExpectedMoreDownloadedFilesError(ChecksumVerificationError):
"""Some files were supposed to be downloaded but were not.""" | 79 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class NonMatchingChecksumError(ChecksumVerificationError):
"""The downloaded file checksum don't match the expected checksum.""" | 80 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class SplitsVerificationError(DatasetsError):
"""Error raised during splits verifications.""" | 81 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class UnexpectedSplitsError(SplitsVerificationError):
"""The expected splits of the downloaded file is missing.""" | 82 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class ExpectedMoreSplitsError(SplitsVerificationError):
"""Some recorded splits are missing.""" | 83 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class NonMatchingSplitsSizesError(SplitsVerificationError):
"""The splits sizes don't match the expected splits sizes.""" | 84 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/exceptions.py |
class SplitInfo:
name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True})
num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
shard_lengths: Optional[List[int]] = None
# Deprecated
# For backward compatibility, this field needs to always be included in files like
# dataset_infos.json and dataset_info.json files
# To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info)
dataset_name: Optional[str] = dataclasses.field(
default=None, metadata={"include_in_asdict_even_if_is_default": True}
) | 85 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
# `self.dataset_name` is assigned in `SplitDict.add()`.
instructions = make_file_instructions(
name=self.dataset_name,
split_infos=[self],
instruction=str(self.name),
)
return instructions.file_instructions | 85 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class SubSplitInfo:
"""Wrapper around a sub split info.
This class expose info on the subsplit:
```
ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True)
info.splits['train[75%:]'].num_examples
```
"""
instructions: FileInstructions
@property
def num_examples(self):
"""Returns the number of example in the subsplit."""
return self.instructions.num_examples
@property
def file_instructions(self):
"""Returns the list of dict(filename, take, skip)."""
return self.instructions.file_instructions | 86 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class SplitBase(metaclass=abc.ABCMeta):
# pylint: disable=line-too-long
"""Abstract base class for Split compositionality.
See the
[guide on splits](../loading#slice-splits)
for more information.
There are three parts to the composition:
1) The splits are composed (defined, merged, split,...) together before
calling the `.as_dataset()` function. This is done with the `__add__`,
`__getitem__`, which return a tree of `SplitBase` (whose leaf
are the `NamedSplit` objects)
```
split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50])
```
2) The `SplitBase` is forwarded to the `.as_dataset()` function
to be resolved into actual read instruction. This is done by the
`.get_read_instruction()` method which takes the real dataset splits
(name, number of shards,...) and parse the tree to return a
`SplitReadInstruction()` object | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
```
read_instruction = split.get_read_instruction(self.info.splits)
```
3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline
to define which files to read and how to skip examples within file.
"""
# pylint: enable=line-too-long
@abc.abstractmethod
def get_read_instruction(self, split_dict):
"""Parse the descriptor tree and compile all read instructions together.
Args:
split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset
Returns:
split_read_instruction: `SplitReadInstruction`
"""
raise NotImplementedError("Abstract method")
def __eq__(self, other):
"""Equality: datasets.Split.TRAIN == 'train'."""
if isinstance(other, (NamedSplit, str)):
return False
raise NotImplementedError("Equality is not implemented between merged/sub splits.") | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def __ne__(self, other):
"""InEquality: datasets.Split.TRAIN != 'test'."""
return not self.__eq__(other)
def __add__(self, other):
"""Merging: datasets.Split.TRAIN + datasets.Split.TEST."""
return _SplitMerged(self, other)
def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name
"""Divides this split into subsplits.
There are 3 ways to define subsplits, which correspond to the 3
arguments `k` (get `k` even subsplits), `percent` (get a slice of the
dataset with `datasets.percent`), and `weighted` (get subsplits with proportions
specified by `weighted`).
Example::
```
# 50% train, 50% test
train, test = split.subsplit(k=2)
# 50% train, 25% test, 25% validation
train, test, validation = split.subsplit(weighted=[2, 1, 1])
# Extract last 20%
subsplit = split.subsplit(datasets.percent[-20:])
``` | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
Warning: k and weighted will be converted into percent which mean that
values below the percent will be rounded up or down. The final split may be
bigger to deal with remainders. For instance:
```
train, test, valid = split.subsplit(k=3) # 33%, 33%, 34%
s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18%
``` | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
Args:
arg: If no kwargs are given, `arg` will be interpreted as one of
`k`, `percent`, or `weighted` depending on the type.
For example:
```
split.subsplit(10) # Equivalent to split.subsplit(k=10)
split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20]
split.subsplit([1, 1, 2]) # weighted=[1, 1, 2]
```
k: `int` If set, subdivide the split into `k` equal parts.
percent: `datasets.percent slice`, return a single subsplit corresponding to
a slice of the original split. For example:
`split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`.
weighted: `list[int]`, return a list of subsplits whose proportions match
the normalized sum of the list. For example:
`split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`. | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
Returns:
A subsplit or list of subsplits extracted from this split object.
"""
# Note that the percent kwargs redefine the outer name datasets.percent. This
# is done for consistency (.subsplit(percent=datasets.percent[:40]))
if sum(bool(x) for x in (arg, k, percent, weighted)) != 1:
raise ValueError("Only one argument of subsplit should be set.")
# Auto deduce k
if isinstance(arg, int):
k = arg
elif isinstance(arg, slice):
percent = arg
elif isinstance(arg, list):
weighted = arg
if not (k or percent or weighted):
raise ValueError(
f"Invalid split argument {arg}. Only list, slice and int supported. "
"One of k, weighted or percent should be set to a non empty value."
) | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def assert_slices_coverage(slices):
# Ensure that the expended slices cover all percents.
assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100)) | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
if k:
if not 0 < k <= 100:
raise ValueError(f"Subsplit k should be between 0 and 100, got {k}")
shift = 100 // k
slices = [slice(i * shift, (i + 1) * shift) for i in range(k)]
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
elif percent:
return _SubSplit(self, percent)
elif weighted:
# Normalize the weighted sum
total = sum(weighted)
weighted = [100 * x // total for x in weighted]
# Create the slice for each of the elements
start = 0
stop = 0
slices = []
for v in weighted:
stop += v
slices.append(slice(start, stop))
start = stop | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
# Round up last element to ensure all elements are taken
slices[-1] = slice(slices[-1].start, 100)
# Internal check to ensure full coverage
assert_slices_coverage(slices)
return tuple(_SubSplit(self, s) for s in slices)
else:
# Should not be possible
raise ValueError("Could not determine the split") | 87 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class PercentSliceMeta(type):
def __getitem__(cls, slice_value):
if not isinstance(slice_value, slice):
raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}")
return slice_value | 88 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class PercentSlice(metaclass=PercentSliceMeta):
# pylint: disable=line-too-long
"""Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`.
See the
[guide on splits](../loading#slice-splits)
for more information.
"""
# pylint: enable=line-too-long
pass | 89 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class _SplitMerged(SplitBase):
"""Represent two split descriptors merged together."""
def __init__(self, split1, split2):
self._split1 = split1
self._split2 = split2
def get_read_instruction(self, split_dict):
read_instruction1 = self._split1.get_read_instruction(split_dict)
read_instruction2 = self._split2.get_read_instruction(split_dict)
return read_instruction1 + read_instruction2
def __repr__(self):
return f"({repr(self._split1)} + {repr(self._split2)})" | 90 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class _SubSplit(SplitBase):
"""Represent a sub split of a split descriptor."""
def __init__(self, split, slice_value):
self._split = split
self._slice_value = slice_value
def get_read_instruction(self, split_dict):
return self._split.get_read_instruction(split_dict)[self._slice_value]
def __repr__(self):
slice_str = "{start}:{stop}"
if self._slice_value.step is not None:
slice_str += ":{step}"
slice_str = slice_str.format(
start="" if self._slice_value.start is None else self._slice_value.start,
stop="" if self._slice_value.stop is None else self._slice_value.stop,
step=self._slice_value.step,
)
return f"{repr(self._split)}(datasets.percent[{slice_str}])" | 91 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class NamedSplit(SplitBase):
"""Descriptor corresponding to a named split (train, test, ...).
Example:
Each descriptor can be composed with other using addition or slice:
```py
split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST
```
The resulting split will correspond to 25% of the train split merged with
100% of the test split.
A split cannot be added twice, so the following will fail:
```py
split = (
datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
datasets.Split.TRAIN.subsplit(datasets.percent[75:])
) # Error
split = datasets.Split.TEST + datasets.Split.ALL # Error
```
The slices can be applied only one time. So the following are valid: | 92 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
```py
split = (
datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
datasets.Split.TEST.subsplit(datasets.percent[:50])
)
split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50])
```
But this is not valid:
```py
train = datasets.Split.TRAIN
test = datasets.Split.TEST
split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25])
split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50])
```
""" | 92 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def __init__(self, name):
self._name = name
split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")]
for split_name in split_names_from_instruction:
if not re.match(_split_re, split_name):
raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.")
def __str__(self):
return self._name
def __repr__(self):
return f"NamedSplit({self._name!r})"
def __eq__(self, other):
"""Equality: datasets.Split.TRAIN == 'train'."""
if isinstance(other, NamedSplit):
return self._name == other._name # pylint: disable=protected-access
elif isinstance(other, SplitBase):
return False
elif isinstance(other, str): # Other should be string
return self._name == other
else:
return False
def __lt__(self, other):
return self._name < other._name # pylint: disable=protected-access | 92 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def __hash__(self):
return hash(self._name)
def get_read_instruction(self, split_dict):
return SplitReadInstruction(split_dict[self._name]) | 92 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class NamedSplitAll(NamedSplit):
"""Split corresponding to the union of all defined dataset splits."""
def __init__(self):
super().__init__("all")
def __repr__(self):
return "NamedSplitAll()"
def get_read_instruction(self, split_dict):
# Merge all dataset split together
read_instructions = [SplitReadInstruction(s) for s in split_dict.values()]
return sum(read_instructions, SplitReadInstruction()) | 93 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class Split:
# pylint: disable=line-too-long
"""`Enum` for dataset splits.
Datasets are typically split into different subsets to be used at various
stages of training and evaluation.
- `TRAIN`: the training data.
- `VALIDATION`: the validation data. If present, this is typically used as
evaluation data while iterating on a model (e.g. changing hyperparameters,
model architecture, etc.).
- `TEST`: the testing data. This is the data to report metrics on. Typically
you do not want to use this during model iteration as you may overfit to it.
- `ALL`: the union of all defined dataset splits.
All splits, including compositions inherit from `datasets.SplitBase`.
See the [guide](../load_hub#splits) on splits for more information.
Example: | 94 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
```py
>>> datasets.SplitGenerator(
... name=datasets.Split.TRAIN,
... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)},
... ),
... datasets.SplitGenerator(
... name=datasets.Split.VALIDATION,
... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)},
... ),
... datasets.SplitGenerator(
... name=datasets.Split.TEST,
... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)},
... )
```
"""
# pylint: enable=line-too-long
TRAIN = NamedSplit("train")
TEST = NamedSplit("test")
VALIDATION = NamedSplit("validation")
ALL = NamedSplitAll()
def __new__(cls, name):
"""Create a custom split with datasets.Split('custom_name')."""
return NamedSplitAll() if name == "all" else NamedSplit(name) | 94 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class SplitReadInstruction:
"""Object containing the reading instruction for the dataset.
Similarly to `SplitDescriptor` nodes, this object can be composed with itself,
but the resolution happens instantaneously, instead of keeping track of the
tree, such as all instructions are compiled and flattened in a single
SplitReadInstruction object containing the list of files and slice to use.
Once resolved, the instructions can be accessed with:
```
read_instructions.get_list_sliced_split_info() # List of splits to use
```
"""
def __init__(self, split_info=None):
self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with itself.")
if split_info:
self.add(SlicedSplitInfo(split_info=split_info, slice_value=None)) | 95 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def add(self, sliced_split):
"""Add a SlicedSplitInfo the read instructions."""
# TODO(epot): Check that the number of examples per shard % 100 == 0
# Otherwise the slices value may be unbalanced and not exactly reflect the
# requested slice.
self._splits[sliced_split.split_info.name] = sliced_split
def __add__(self, other):
"""Merging split together."""
# Will raise error if a split has already be added (NonMutableDict)
# TODO(epot): If a split is already added but there is no overlap between
# the slices, should merge the slices (ex: [:10] + [80:])
split_instruction = SplitReadInstruction()
split_instruction._splits.update(self._splits) # pylint: disable=protected-access
split_instruction._splits.update(other._splits) # pylint: disable=protected-access
return split_instruction | 95 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def __getitem__(self, slice_value):
"""Sub-splits."""
# Will raise an error if a split has already been sliced
split_instruction = SplitReadInstruction()
for v in self._splits.values():
if v.slice_value is not None:
raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced")
v = v._asdict()
v["slice_value"] = slice_value
split_instruction.add(SlicedSplitInfo(**v))
return split_instruction
def get_list_sliced_split_info(self):
return list(self._splits.values()) | 95 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class SplitDict(dict):
"""Split info object."""
def __init__(self, *args, dataset_name=None, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_name = dataset_name
def __getitem__(self, key: Union[SplitBase, str]):
# 1st case: The key exists: `info.splits['train']`
if str(key) in self:
return super().__getitem__(str(key))
# 2nd case: Uses instructions: `info.splits['train[50%]']`
else:
instructions = make_file_instructions(
name=self.dataset_name,
split_infos=self.values(),
instruction=key,
)
return SubSplitInfo(instructions)
def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo):
if key != value.name:
raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')")
super().__setitem__(key, value) | 96 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def add(self, split_info: SplitInfo):
"""Add the split info."""
if split_info.name in self:
raise ValueError(f"Split {split_info.name} already present")
split_info.dataset_name = self.dataset_name
super().__setitem__(split_info.name, split_info)
@property
def total_num_examples(self):
"""Return the total number of examples."""
return sum(s.num_examples for s in self.values())
@classmethod
def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None):
"""Returns a new SplitDict initialized from a Dict or List of `split_infos`."""
if isinstance(split_infos, dict):
split_infos = list(split_infos.values())
if dataset_name is None:
dataset_name = split_infos[0].get("dataset_name") if split_infos else None
split_dict = cls(dataset_name=dataset_name) | 96 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
for split_info in split_infos:
if isinstance(split_info, dict):
split_info = SplitInfo(**split_info)
split_dict.add(split_info)
return split_dict
def to_split_dict(self):
"""Returns a list of SplitInfo protos that we have."""
out = []
for split_name, split_info in self.items():
split_info = copy.deepcopy(split_info)
split_info.name = split_name
out.append(split_info)
return out
def copy(self):
return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name) | 96 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
def _to_yaml_list(self) -> list:
out = [asdict(s) for s in self.to_split_dict()]
# we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc
for split_info_dict in out:
split_info_dict.pop("shard_lengths", None)
# we don't need the dataset_name attribute that is deprecated
for split_info_dict in out:
split_info_dict.pop("dataset_name", None)
return out
@classmethod
def _from_yaml_list(cls, yaml_data: list) -> "SplitDict":
return cls.from_split_dict(yaml_data) | 96 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
class SplitGenerator:
"""Defines the split information for the generator.
This should be used as returned value of
`GeneratorBasedBuilder._split_generators`.
See `GeneratorBasedBuilder._split_generators` for more info and example
of usage.
Args:
name (`str`):
Name of the `Split` for which the generator will
create the examples.
**gen_kwargs (additional keyword arguments):
Keyword arguments to forward to the `DatasetBuilder._generate_examples` method
of the builder.
Example:
```py
>>> datasets.SplitGenerator(
... name=datasets.Split.TRAIN,
... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)},
... )
```
"""
name: str
gen_kwargs: Dict = dataclasses.field(default_factory=dict)
split_info: SplitInfo = dataclasses.field(init=False) | 97 | /Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/splits.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.