Dataset Viewer
hash
stringlengths 40
40
| authorName
stringclasses 42
values | authorEmail
stringclasses 41
values | date
timestamp[ms]date 2021-07-26 09:52:55
2025-07-18 10:19:56
| subject
stringlengths 11
116
| diff
stringlengths 0
987k
|
---|---|---|---|---|---|
cf79aee59cb74b067987238cfbf0be493428b802
|
Remy
| 2025-07-18T10:19:56 |
fix(infra): resources optimization (#3225)
|
diff --git a/chart/env/prod.yaml b/chart/env/prod.yaml
index 18862dd7..b7ca3a2e 100644
--- a/chart/env/prod.yaml
+++ b/chart/env/prod.yaml
@@ -347,2 +347,2 @@ api:
- cpu: 4
- memory: "14Gi"
+ cpu: "900m"
+ memory: "4Gi"
@@ -350,2 +350,2 @@ api:
- cpu: 4
- memory: "14Gi"
+ cpu: "1500m"
+ memory: "6Gi"
@@ -516,2 +516,2 @@ workers:
- cpu: 2
- memory: "14Gi"
+ cpu: "1200m"
+ memory: "10Gi"
|
|
9efbe6f23800c645e1f4a486d5d32dd2c577237a
|
ccl-core
| 2025-07-17T20:55:29 |
Include Audio features in HuggingFace. (#3224)
|
diff --git a/libs/libcommon/src/libcommon/croissant_utils.py b/libs/libcommon/src/libcommon/croissant_utils.py
index cf37c0b0..0922bb65 100644
--- a/libs/libcommon/src/libcommon/croissant_utils.py
+++ b/libs/libcommon/src/libcommon/croissant_utils.py
@@ -8 +8 @@ from typing import Any, Optional, Union
-from datasets import ClassLabel, Image, LargeList, List, Value
+from datasets import Audio, ClassLabel, Image, LargeList, List, Value
@@ -141,0 +142,10 @@ def feature_to_croissant_field(
+ elif isinstance(feature, Audio):
+ source = get_source(distribution_name, column, add_transform, json_path)
+ if sample_rate := feature.get("sampling_rate"):
+ source["sampling_rate"] = sample_rate
+ return {
+ "@type": "cr:Field",
+ "@id": field_name,
+ "dataType": "sc:AudioObject",
+ "source": source,
+ }
|
|
03e368d7022cf9d07135d00fcc769a43e34c4f4f
|
ccl-core
| 2025-07-17T18:15:15 |
Escape subfields. (#3220)
|
diff --git a/libs/libcommon/src/libcommon/croissant_utils.py b/libs/libcommon/src/libcommon/croissant_utils.py
index 6dfcaacf..cf37c0b0 100644
--- a/libs/libcommon/src/libcommon/croissant_utils.py
+++ b/libs/libcommon/src/libcommon/croissant_utils.py
@@ -9,0 +10,23 @@ from datasets import ClassLabel, Image, LargeList, List, Value
+NAME_PATTERN_REGEX = "[^a-zA-Z0-9\\-_\\.]"
+JSONPATH_PATTERN_REGEX = re.compile(r"^[a-zA-Z0-9_]+$")
+
+
+def escape_ids(id_to_escape: str, ids: set[str]) -> str:
+ """Escapes IDs and names in Croissant.
+
+ Reasons:
+ - `/` are used in the syntax as delimiters. So we replace them.
+ - Two FileObject/FileSet/RecordSet/Fields cannot have the same ID. So we append a postfix in case it happens.
+
+ Args:
+ id_to_escape: The initial non-escaped ID.
+ ids: The set of already existing IDs.
+ Returns:
+ `str`: The escaped, unique ID or name.
+ """
+ escaped_id = re.sub(NAME_PATTERN_REGEX, "_", id_to_escape)
+ while escaped_id in ids:
+ escaped_id = f"{escaped_id}_0"
+ ids.add(escaped_id)
+ return escaped_id
+
@@ -69,6 +92,6 @@ def escape_jsonpath_key(feature_name: str) -> str:
- if "/" in feature_name or "'" in feature_name or "]" in feature_name or "[" in feature_name:
- escaped_name = re.sub(r"(?<!\\)'", r"\'", feature_name)
- escaped_name = re.sub(r"(?<!\\)\[", r"\[", escaped_name)
- escaped_name = re.sub(r"(?<!\\)\]", r"\]", escaped_name)
- return f"['{escaped_name}']"
- return feature_name
+ if JSONPATH_PATTERN_REGEX.match(feature_name):
+ return feature_name
+ escaped_name = re.sub(r"(?<!\\)'", r"\'", feature_name)
+ escaped_name = re.sub(r"(?<!\\)\[", r"\[", escaped_name)
+ escaped_name = re.sub(r"(?<!\\)\]", r"\]", escaped_name)
+ return f"['{escaped_name}']"
@@ -94,0 +118 @@ def feature_to_croissant_field(
+ existing_ids: set[str],
@@ -135 +159 @@ def feature_to_croissant_field(
- f"{field_name}/{subfeature_name}",
+ f"{field_name}/{escape_ids(subfeature_name, existing_ids)}",
@@ -137,0 +162 @@ def feature_to_croissant_field(
+ existing_ids=existing_ids,
@@ -158 +183,7 @@ def feature_to_croissant_field(
- distribution_name, field_name, column, sub_feature, add_transform=True, json_path=json_path
+ distribution_name,
+ field_name,
+ column,
+ sub_feature,
+ existing_ids=existing_ids,
+ add_transform=True,
+ json_path=json_path,
diff --git a/libs/libcommon/tests/test_croissant_utils.py b/libs/libcommon/tests/test_croissant_utils.py
index 082d7748..1a2d1b45 100644
--- a/libs/libcommon/tests/test_croissant_utils.py
+++ b/libs/libcommon/tests/test_croissant_utils.py
@@ -11,0 +12 @@ from libcommon.croissant_utils import (
+ escape_ids,
@@ -36,0 +38,18 @@ def test_truncate_features_from_croissant_crumbs_response(num_columns: int) -> N
[email protected](
+ "id_to_escape, ids, expected_id",
+ [
+ ("valid_id", {"other", "other2"}, "valid_id"),
+ ("id with spaces", set(), "id_with_spaces"),
+ ("a/b/c", set(), "a_b_c"),
+ ("a/b/c", {"a_b_c"}, "a_b_c_0"),
+ ("a/b/c", {"a_b_c", "a_b_c_0"}, "a_b_c_0_0"),
+ ("a@#$b", set(), "a___b"),
+ ("", set(), ""),
+ ("", {""}, "_0"),
+ ],
+)
+def test_escape_ids(id_to_escape: str, ids: set[str], expected_id: str) -> None:
+ """Tests the expected_id function with various inputs."""
+ assert escape_ids(id_to_escape, ids=ids.copy()) == expected_id
+
+
@@ -45,0 +65 @@ def test_truncate_features_from_croissant_crumbs_response(num_columns: int) -> N
+ ("feature with spaces", "['feature with spaces']"),
@@ -104 +124 @@ def test_escape_jsonpath_key(feature_name: str, expected_output: str) -> None:
- "transform": [{"jsonPath": "sub-field"}, {"jsonPath": "sub-sub-field"}],
+ "transform": [{"jsonPath": "['sub-field']"}, {"jsonPath": "['sub-sub-field']"}],
@@ -118 +138,3 @@ def test_feature_to_croissant_field(hf_datasets_feature: Any, croissant_field: A
- feature_to_croissant_field("distribution_name", "field_name", "column_name", hf_datasets_feature)
+ feature_to_croissant_field(
+ "distribution_name", "field_name", "column_name", hf_datasets_feature, existing_ids=set()
+ )
diff --git a/services/worker/src/worker/job_runners/dataset/croissant_crumbs.py b/services/worker/src/worker/job_runners/dataset/croissant_crumbs.py
index 7b8b2b1e..4bb911a5 100644
--- a/services/worker/src/worker/job_runners/dataset/croissant_crumbs.py
+++ b/services/worker/src/worker/job_runners/dataset/croissant_crumbs.py
@@ -12 +12 @@ from libcommon.constants import CROISSANT_MAX_CONFIGS
-from libcommon.croissant_utils import feature_to_croissant_field, get_record_set
+from libcommon.croissant_utils import escape_ids, feature_to_croissant_field, get_record_set
@@ -21,22 +20,0 @@ from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner
-NAME_PATTERN_REGEX = "[^a-zA-Z0-9\\-_\\.]"
-
-
-def _escape_name(name: str, names: set[str]) -> str:
- """Escapes names and IDs in Croissant.
-
- Reasons:
- - `/` are used in the syntax as delimiters. So we replace them.
- - Two FileObject/FileSet/RecordSet/Fields cannot have the same ID. So we append a postfix in case it happens.
-
- Args:
- name: The initial non-escaped name.
- names: The set of already existing names.
- Returns:
- `str`: The escaped name.
- """
- escaped_name = re.sub(NAME_PATTERN_REGEX, "_", name)
- while escaped_name in names:
- escaped_name = f"{escaped_name}_0"
- names.add(escaped_name)
- return escaped_name
-
@@ -58 +36 @@ def get_croissant_crumbs_from_dataset_infos(
- names: set[str] = set(repo_name)
+ ids: set[str] = set(repo_name)
@@ -79 +57 @@ def get_croissant_crumbs_from_dataset_infos(
- distribution_name = _escape_name(f"parquet-files-for-config-{config}", names)
+ distribution_name = escape_ids(f"parquet-files-for-config-{config}", ids)
@@ -93 +71 @@ def get_croissant_crumbs_from_dataset_infos(
- record_set_name = _escape_name(record_set_name, names)
+ record_set_name = escape_ids(record_set_name, ids)
@@ -134,2 +112,2 @@ def get_croissant_crumbs_from_dataset_infos(
- field_name = f"{record_set_name}/{_escape_name(column, fields_names)}"
- field = feature_to_croissant_field(distribution_name, field_name, column, feature)
+ field_name = f"{record_set_name}/{escape_ids(column, fields_names)}"
+ field = feature_to_croissant_field(distribution_name, field_name, column, feature, ids)
|
|
9680713930ec0dcb755414debed0de1cdaf03ef1
|
Arjun Jagdale
| 2025-07-17T15:00:39 |
refactor(tests): use HfApi.update_repo_settings to simplify gated dataset test setup (#3206)
|
diff --git a/jobs/cache_maintenance/tests/utils.py b/jobs/cache_maintenance/tests/utils.py
index 78ccf145..9c97dd19 100644
--- a/jobs/cache_maintenance/tests/utils.py
+++ b/jobs/cache_maintenance/tests/utils.py
@@ -8 +8 @@ from pathlib import Path
-from typing import Any, Optional, Union
+from typing import Literal, Optional, cast
@@ -10,0 +11 @@ import requests
+from huggingface_hub import HfApi
@@ -14,2 +14,0 @@ from huggingface_hub.constants import (
- REPO_TYPES,
- REPO_TYPES_URL_PREFIXES,
@@ -17,2 +15,0 @@ from huggingface_hub.constants import (
-from huggingface_hub.hf_api import HfApi
-from huggingface_hub.utils import hf_raise_for_status
@@ -41,68 +37,0 @@ def get_default_config_split() -> tuple[str, str]:
-def update_repo_settings(
- *,
- repo_id: str,
- private: Optional[bool] = None,
- gated: Optional[str] = None,
- token: Optional[str] = None,
- organization: Optional[str] = None,
- repo_type: Optional[str] = None,
- name: Optional[str] = None,
-) -> Any:
- """Update the settings of a repository.
- Args:
- repo_id (`str`, *optional*):
- A namespace (user or an organization) and a repo name separated
- by a `/`.
- <Tip>
- Version added: 0.5
- </Tip>
- private (`bool`, *optional*):
- Whether the repo should be private.
- gated (`str`, *optional*):
- Whether the repo should request user access.
- Possible values are 'auto' and 'manual'
- token (`str`, *optional*):
- An authentication token (See https://huggingface.co/settings/token)
- repo_type (`str`, *optional*):
- Set to `"dataset"` or `"space"` if uploading to a dataset or
- space, `None` or `"model"` if uploading to a model.
-
- Raises:
- [~`huggingface_hub.utils.RepositoryNotFoundError`]:
- If the repository to download from cannot be found. This may be because it doesn't exist,
- or because it is set to `private` and you do not have access.
-
- Returns:
- `Any`: The HTTP response in json.
- """
- if repo_type not in REPO_TYPES:
- raise ValueError("Invalid repo type")
-
- organization, name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
-
- if organization is None:
- namespace = hf_api.whoami(token)["name"]
- else:
- namespace = organization
-
- path_prefix = f"{hf_api.endpoint}/api/"
- if repo_type in REPO_TYPES_URL_PREFIXES:
- path_prefix += REPO_TYPES_URL_PREFIXES[repo_type]
-
- path = f"{path_prefix}{namespace}/{name}/settings"
-
- json: dict[str, Union[bool, str]] = {}
- if private is not None:
- json["private"] = private
- if gated is not None:
- json["gated"] = gated
-
- r = requests.put(
- path,
- headers={"authorization": f"Bearer {token}"},
- json=json,
- )
- hf_raise_for_status(r)
- return r.json()
-
-
@@ -120 +49,6 @@ def create_empty_hub_dataset_repo(
- update_repo_settings(repo_id=repo_id, token=CI_USER_TOKEN, gated=gated, repo_type=DATASET)
+ HfApi(endpoint=CI_HUB_ENDPOINT).update_repo_settings(
+ repo_id=repo_id,
+ token=CI_USER_TOKEN,
+ gated=cast(Literal["auto", "manual", False], gated),
+ repo_type=DATASET,
+ )
diff --git a/services/admin/tests/fixtures/hub.py b/services/admin/tests/fixtures/hub.py
index 9ad7a675..4f52abb4 100644
--- a/services/admin/tests/fixtures/hub.py
+++ b/services/admin/tests/fixtures/hub.py
@@ -9 +9 @@ from contextlib import contextmanager, suppress
-from typing import Any, Literal, Optional, TypedDict, Union
+from typing import Literal, Optional, TypedDict, cast
@@ -13,3 +13 @@ import requests
-from huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES
-from huggingface_hub.hf_api import HfApi
-from huggingface_hub.utils import hf_raise_for_status
+from huggingface_hub import HfApi
@@ -24,69 +21,0 @@ CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
-def update_repo_settings(
- hf_api: HfApi,
- repo_id: str,
- *,
- private: Optional[bool] = None,
- gated: Optional[str] = None,
- token: Optional[str] = None,
- organization: Optional[str] = None,
- repo_type: Optional[str] = None,
- name: Optional[str] = None,
-) -> Any:
- """Update the settings of a repository.
- Args:
- repo_id (`str`, *optional*):
- A namespace (user or an organization) and a repo name separated
- by a `/`.
- <Tip>
- Version added: 0.5
- </Tip>
- private (`bool`, *optional*):
- Whether the repo should be private.
- gated (`str`, *optional*):
- Whether the repo should request user access.
- Possible values are 'auto' and 'manual'
- token (`str`, *optional*):
- An authentication token (See https://huggingface.co/settings/token)
- repo_type (`str`, *optional*):
- Set to `"dataset"` or `"space"` if uploading to a dataset or
- space, `None` or `"model"` if uploading to a model.
-
- Raises:
- [~`huggingface_hub.utils.RepositoryNotFoundError`]:
- If the repository to download from cannot be found. This may be because it doesn't exist,
- or because it is set to `private` and you do not have access.
-
- Returns:
- `Any`: The HTTP response in json.
- """
- if repo_type not in REPO_TYPES:
- raise ValueError("Invalid repo type")
-
- organization, name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
-
- if organization is None:
- namespace = hf_api.whoami(token=token)["name"]
- else:
- namespace = organization
-
- path_prefix = f"{hf_api.endpoint}/api/"
- if repo_type in REPO_TYPES_URL_PREFIXES:
- path_prefix += REPO_TYPES_URL_PREFIXES[repo_type]
-
- path = f"{path_prefix}{namespace}/{name}/settings"
-
- json: dict[str, Union[bool, str]] = {}
- if private is not None:
- json["private"] = private
- if gated is not None:
- json["gated"] = gated
-
- r = requests.put(
- path,
- headers={"authorization": f"Bearer {token}"},
- json=json,
- )
- hf_raise_for_status(r)
- return r.json()
-
-
@@ -145 +74,6 @@ def create_hf_dataset_repo(
- update_repo_settings(hf_api, repo_id, token=hf_token, gated=gated, repo_type="dataset")
+ hf_api.update_repo_settings(
+ repo_id=repo_id,
+ token=hf_token,
+ gated=cast(Literal["auto", "manual", False], gated),
+ repo_type="dataset",
+ )
diff --git a/services/worker/tests/fixtures/hub.py b/services/worker/tests/fixtures/hub.py
index c9f9fe5a..45ceacec 100644
--- a/services/worker/tests/fixtures/hub.py
+++ b/services/worker/tests/fixtures/hub.py
@@ -11 +11 @@ from pathlib import Path
-from typing import Any, Literal, Optional, TypedDict, Union
+from typing import Any, Literal, Optional, TypedDict, cast
@@ -16,3 +16 @@ from datasets import Dataset, Features, Value
-from huggingface_hub.constants import REPO_TYPES, REPO_TYPES_URL_PREFIXES
-from huggingface_hub.hf_api import HfApi
-from huggingface_hub.utils import hf_raise_for_status
+from huggingface_hub import HfApi
@@ -35,69 +32,0 @@ def get_default_config_split() -> tuple[str, str]:
-def update_repo_settings(
- *,
- repo_id: str,
- private: Optional[bool] = None,
- gated: Optional[str] = None,
- token: Optional[str] = None,
- organization: Optional[str] = None,
- repo_type: Optional[str] = None,
- name: Optional[str] = None,
-) -> Any:
- """Update the settings of a repository.
-
- Args:
- repo_id (`str`, *optional*):
- A namespace (user or an organization) and a repo name separated
- by a `/`.
- <Tip>
- Version added: 0.5
- </Tip>
- private (`bool`, *optional*):
- Whether the repo should be private.
- gated (`str`, *optional*):
- Whether the repo should request user access.
- Possible values are 'auto' and 'manual'
- token (`str`, *optional*):
- An authentication token (See https://huggingface.co/settings/token)
- repo_type (`str`, *optional*):
- Set to `"dataset"` or `"space"` if uploading to a dataset or
- space, `None` or `"model"` if uploading to a model.
-
- Raises:
- [~`huggingface_hub.utils.RepositoryNotFoundError`]:
- If the repository to download from cannot be found. This may be because it doesn't exist,
- or because it is set to `private` and you do not have access.
-
- Returns:
- `Any`: The HTTP response in json.
- """
- if repo_type not in REPO_TYPES:
- raise ValueError("Invalid repo type")
-
- organization, name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
-
- if organization is None:
- namespace = hf_api.whoami(token)["name"]
- else:
- namespace = organization
-
- path_prefix = f"{hf_api.endpoint}/api/"
- if repo_type in REPO_TYPES_URL_PREFIXES:
- path_prefix += REPO_TYPES_URL_PREFIXES[repo_type]
-
- path = f"{path_prefix}{namespace}/{name}/settings"
-
- json: dict[str, Union[bool, str]] = {}
- if private is not None:
- json["private"] = private
- if gated is not None:
- json["gated"] = gated
-
- r = requests.put(
- path,
- headers={"authorization": f"Bearer {token}"},
- json=json,
- )
- hf_raise_for_status(r)
- return r.json()
-
-
@@ -127,0 +57 @@ def create_hub_dataset_repo(
+
@@ -129 +59,7 @@ def create_hub_dataset_repo(
- update_repo_settings(repo_id=repo_id, token=CI_USER_TOKEN, gated=gated, repo_type=DATASET)
+ HfApi(endpoint=CI_HUB_ENDPOINT).update_repo_settings(
+ repo_id=repo_id,
+ token=CI_USER_TOKEN,
+ gated=cast(Literal["auto", "manual", False], gated),
+ repo_type=DATASET,
+ )
+
|
|
640f1f3c923543d8f6c5bebb185c45d79f00ff48
|
Quentin Lhoest
| 2025-07-15T14:17:39 |
bump datasets for json (#3222)
|
diff --git a/front/admin_ui/poetry.lock b/front/admin_ui/poetry.lock
index fa10d964..3774e774 100644
--- a/front/admin_ui/poetry.lock
+++ b/front/admin_ui/poetry.lock
@@ -695,2 +695,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1536 +1536 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/jobs/cache_maintenance/poetry.lock b/jobs/cache_maintenance/poetry.lock
index 1aeb29a5..b16ded80 100644
--- a/jobs/cache_maintenance/poetry.lock
+++ b/jobs/cache_maintenance/poetry.lock
@@ -627,2 +627,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1213 +1213 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/jobs/mongodb_migration/poetry.lock b/jobs/mongodb_migration/poetry.lock
index aa0f31f9..effd4f82 100644
--- a/jobs/mongodb_migration/poetry.lock
+++ b/jobs/mongodb_migration/poetry.lock
@@ -627,2 +627,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1213 +1213 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/libs/libapi/poetry.lock b/libs/libapi/poetry.lock
index 89931b71..8b3a63bd 100644
--- a/libs/libapi/poetry.lock
+++ b/libs/libapi/poetry.lock
@@ -617,2 +617,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1227 +1227 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/libs/libcommon/poetry.lock b/libs/libcommon/poetry.lock
index 62f3c019..70c98c80 100644
--- a/libs/libcommon/poetry.lock
+++ b/libs/libcommon/poetry.lock
@@ -653,2 +653,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -4159 +4159 @@ python-versions = "3.9.18"
-content-hash = "8acfdbeec70806a00f966dbe44aefd78ba26fb0ad8a9418bef5c89545ff8d36d"
+content-hash = "60fe3d4c81ea146fca20eed29f519967db250f7e252aa1da37ed2e3cc3db6a24"
diff --git a/libs/libcommon/pyproject.toml b/libs/libcommon/pyproject.toml
index c93891c1..1c3de4a8 100644
--- a/libs/libcommon/pyproject.toml
+++ b/libs/libcommon/pyproject.toml
@@ -12 +12 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/services/admin/poetry.lock b/services/admin/poetry.lock
index 73a3665c..7bace3f3 100644
--- a/services/admin/poetry.lock
+++ b/services/admin/poetry.lock
@@ -642,2 +642,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1309 +1309 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/services/api/poetry.lock b/services/api/poetry.lock
index 38a0a608..e0bd0b7a 100644
--- a/services/api/poetry.lock
+++ b/services/api/poetry.lock
@@ -642,2 +642,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1329 +1329 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/services/rows/poetry.lock b/services/rows/poetry.lock
index dd3eb8d0..6e38dfe6 100644
--- a/services/rows/poetry.lock
+++ b/services/rows/poetry.lock
@@ -650,2 +650,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1297 +1297 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/services/search/poetry.lock b/services/search/poetry.lock
index d5f52cbe..2e0f44fe 100644
--- a/services/search/poetry.lock
+++ b/services/search/poetry.lock
@@ -629,2 +629,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1280 +1280 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/services/sse-api/poetry.lock b/services/sse-api/poetry.lock
index db5a416c..f46c3e3b 100644
--- a/services/sse-api/poetry.lock
+++ b/services/sse-api/poetry.lock
@@ -642,2 +642,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1356 +1356 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
diff --git a/services/webhook/poetry.lock b/services/webhook/poetry.lock
index 0d675879..e5685e11 100644
--- a/services/webhook/poetry.lock
+++ b/services/webhook/poetry.lock
@@ -585 +585 @@ name = "datasets"
-version = "4.0.0.dev0"
+version = "4.0.1.dev0"
@@ -606,0 +607 @@ soundfile = {version = ">=0.12.1", optional = true, markers = "extra == \"audio\
+torch = {version = ">=2.7.0", optional = true, markers = "extra == \"audio\""}
@@ -612 +613 @@ xxhash = "*"
-audio = ["soundfile (>=0.12.1)", "torchcodec (>=0.4.0)"]
+audio = ["soundfile (>=0.12.1)", "torch (>=2.7.0)", "torchcodec (>=0.4.0)"]
@@ -629,2 +630,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1200 +1200,0 @@ files = [
-markers = {main = "sys_platform == \"linux\" or sys_platform == \"darwin\""}
@@ -1276 +1276 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
@@ -1449 +1448,0 @@ files = [
-markers = {main = "sys_platform == \"linux\" or sys_platform == \"darwin\""}
@@ -1597 +1595,0 @@ groups = ["main"]
-markers = "sys_platform == \"linux\" or sys_platform == \"darwin\""
@@ -3125 +3122,0 @@ groups = ["main"]
-markers = "sys_platform == \"linux\" or sys_platform == \"darwin\""
@@ -3216 +3213 @@ groups = ["main"]
-markers = "sys_platform == \"linux\" and platform_machine == \"aarch64\""
+markers = "sys_platform != \"darwin\" and platform_machine == \"aarch64\" or sys_platform != \"linux\" and sys_platform != \"darwin\""
diff --git a/services/worker/poetry.lock b/services/worker/poetry.lock
index 56e309ea..efd08801 100644
--- a/services/worker/poetry.lock
+++ b/services/worker/poetry.lock
@@ -950,2 +950,2 @@ url = "https://github.com/huggingface/datasets.git"
-reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
-resolved_reference = "c4bdfe84586d3789a9db9cde06e1f054043d5569"
+reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
+resolved_reference = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44"
@@ -1679 +1679 @@ cryptography = "^43.0.1"
-datasets = {git = "https://github.com/huggingface/datasets.git", rev = "c4bdfe84586d3789a9db9cde06e1f054043d5569", extras = ["audio", "vision"]}
+datasets = {git = "https://github.com/huggingface/datasets.git", rev = "8a4384dc9484ae9c3100f0fc594cd7773a5b8b44", extras = ["audio", "vision"]}
|
|
7f717292da41f8ac1cd8e2890f0890d670b7562a
|
Quentin Lhoest
| 2025-07-15T14:15:27 |
Allow search to download xet files (#3221)
|
diff --git a/chart/templates/_env/_envDatasetsBased.tpl b/chart/templates/_env/_envDatasetsBased.tpl
deleted file mode 100644
index 5ff79854..00000000
--- a/chart/templates/_env/_envDatasetsBased.tpl
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright 2022 The HuggingFace Authors.
-
-{{- define "envDatasetsBased" -}}
-# the size should remain so small that we don't need to worry about putting it on an external storage
-# note that the /tmp directory is not shared among the pods
-- name: HF_MODULES_CACHE
- value: "/tmp/modules-cache"
-- name: HF_DATASETS_TRUST_REMOTE_CODE
- value: "0"
-{{- end -}}
-
diff --git a/chart/templates/services/search/_container.tpl b/chart/templates/services/search/_container.tpl
index 5d9f8f05..a24f1a9a 100644
--- a/chart/templates/services/search/_container.tpl
+++ b/chart/templates/services/search/_container.tpl
@@ -18,0 +19,3 @@
+ - name: HF_HOME
+ value: "/tmp/hf"
+ # ^ensure the temporary files are created in /tmp, which is writable
diff --git a/chart/templates/worker/_container.tpl b/chart/templates/worker/_container.tpl
index 3064be80..191d8b6a 100644
--- a/chart/templates/worker/_container.tpl
+++ b/chart/templates/worker/_container.tpl
@@ -13 +12,0 @@
- {{ include "envDatasetsBased" . | nindent 2 }}
|
|
49d78b9cdd7dc38ea398ff624a9b2eebbe32b4af
|
Arjun Dinesh Jagdale
| 2025-07-11T18:31:36 |
test: add unit tests for get_previous_step_or_raise (#1908) (#3218)
|
diff --git a/libs/libcommon/tests/test_simple_cache.py b/libs/libcommon/tests/test_simple_cache.py
index 11ad0d32..9434fc21 100644
--- a/libs/libcommon/tests/test_simple_cache.py
+++ b/libs/libcommon/tests/test_simple_cache.py
@@ -31,0 +32 @@ from libcommon.simple_cache import (
+ get_previous_step_or_raise,
@@ -58 +59,3 @@ from .utils import (
-def cache_mongo_resource_autouse(cache_mongo_resource: CacheMongoResource) -> CacheMongoResource:
+def cache_mongo_resource_autouse(
+ cache_mongo_resource: CacheMongoResource,
+) -> CacheMongoResource:
@@ -75 +78,5 @@ def test_insert_null_values() -> None:
- kind=kind, dataset=dataset_a, dataset_git_revision=dataset_git_revision_a, config=config, split=split
+ kind=kind,
+ dataset=dataset_a,
+ dataset_git_revision=dataset_git_revision_a,
+ config=config,
+ split=split,
@@ -261 +268,7 @@ def test_upsert_response_types() -> None:
- now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond // 1000 * 1000
+ now.year,
+ now.month,
+ now.day,
+ now.hour,
+ now.minute,
+ now.second,
+ now.microsecond // 1000 * 1000,
@@ -797 +810,2 @@ NAMES_RESPONSE_OK = ResponseSpec(
- content={NAMES_FIELD: [{NAME_FIELD: name} for name in NAMES]}, http_status=HTTPStatus.OK
+ content={NAMES_FIELD: [{NAME_FIELD: name} for name in NAMES]},
+ http_status=HTTPStatus.OK,
@@ -933,0 +948,64 @@ def test_get_datasets_with_last_updated_kind(entries: list[Entry], expected_data
+
+
+def test_get_previous_step_or_raise_success() -> None:
+ kind = CACHE_KIND
+ dataset = DATASET_NAME
+ config = "test_config"
+ split = "test_split"
+ content = {"key": "value"}
+
+ upsert_response(
+ kind=kind,
+ dataset=dataset,
+ dataset_git_revision=REVISION_NAME,
+ config=config,
+ split=split,
+ content=content,
+ http_status=HTTPStatus.OK,
+ )
+
+ try:
+ response = get_previous_step_or_raise(kind=kind, dataset=dataset, config=config, split=split)
+ assert response["http_status"] == HTTPStatus.OK
+ assert response["content"] == content
+ finally:
+ delete_response(kind=kind, dataset=dataset, config=config, split=split)
+
+
+def test_get_previous_step_or_raise_not_found() -> None:
+ kind = "missing_kind"
+ dataset = "missing_dataset"
+ config = "missing_config"
+ split = "missing_split"
+
+ delete_response(kind=kind, dataset=dataset, config=config, split=split)
+ with pytest.raises(CachedArtifactNotFoundError):
+ get_previous_step_or_raise(kind=kind, dataset=dataset, config=config, split=split)
+
+
+def test_get_previous_step_or_raise_error_status() -> None:
+ kind = CACHE_KIND
+ dataset = "error_dataset"
+ config = "error_config"
+ split = "error_split"
+ content = {"error": "failure"}
+
+ upsert_response(
+ kind=kind,
+ dataset=dataset,
+ dataset_git_revision=REVISION_NAME,
+ config=config,
+ split=split,
+ content=content,
+ http_status=HTTPStatus.INTERNAL_SERVER_ERROR,
+ error_code="some_error",
+ details={"error": "failure"},
+ )
+
+ try:
+ with pytest.raises(CachedArtifactError) as exc_info:
+ get_previous_step_or_raise(kind=kind, dataset=dataset, config=config, split=split)
+ assert exc_info.value.cache_entry_with_details["http_status"] == HTTPStatus.INTERNAL_SERVER_ERROR
+ assert exc_info.value.cache_entry_with_details["content"] == content
+ finally:
+ delete_response(kind=kind, dataset=dataset, config=config, split=split)
|
|
7fa92d676a9986694e82b19d21b7db5d92054df3
|
Arjun Dinesh Jagdale
| 2025-07-11T13:40:42 |
refactor(config): replace get_empty_str_list with CONSTANT.copy in ParquetAndInfoConfig (#1522) (#3219)
|
diff --git a/libs/libcommon/src/libcommon/orchestrator.py b/libs/libcommon/src/libcommon/orchestrator.py
index aa8c9039..b21d4738 100644
--- a/libs/libcommon/src/libcommon/orchestrator.py
+++ b/libs/libcommon/src/libcommon/orchestrator.py
@@ -124,0 +125,3 @@ class Task(ABC):
+DEFAULT_JOB_INFOS: list[JobInfo] = []
+
+
@@ -127 +130 @@ class CreateJobsTask(Task):
- job_infos: list[JobInfo] = field(default_factory=list)
+ job_infos: list[JobInfo] = field(default_factory=DEFAULT_JOB_INFOS.copy)
@@ -250 +253,3 @@ class DeleteDatasetParquetRefBranchTask(Task):
- repo_id=self.dataset, branch="refs/convert/parquet", repo_type="dataset"
+ repo_id=self.dataset,
+ branch="refs/convert/parquet",
+ repo_type="dataset",
@@ -280 +285,3 @@ class DeleteDatasetDuckdbRefBranchTask(Task):
- repo_id=self.dataset, branch="refs/convert/duckdb", repo_type="dataset"
+ repo_id=self.dataset,
+ branch="refs/convert/duckdb",
+ repo_type="dataset",
@@ -311 +318,3 @@ class UpdateRevisionOfDatasetCacheEntriesTask(Task):
- dataset=self.dataset, old_revision=self.old_revision, new_revision=self.new_revision
+ dataset=self.dataset,
+ old_revision=self.old_revision,
+ new_revision=self.new_revision,
@@ -693 +702,4 @@ class DatasetBackfillPlan(Plan):
- self, processing_step: ProcessingStep, config: Optional[str] = None, split: Optional[str] = None
+ self,
+ processing_step: ProcessingStep,
+ config: Optional[str] = None,
+ split: Optional[str] = None,
@@ -846 +858,4 @@ class DatasetBackfillPlan(Plan):
- difficulty = min(DEFAULT_DIFFICULTY_MAX, difficulty + failed_runs * DIFFICULTY_BONUS_BY_FAILED_RUNS)
+ difficulty = min(
+ DEFAULT_DIFFICULTY_MAX,
+ difficulty + failed_runs * DIFFICULTY_BONUS_BY_FAILED_RUNS,
+ )
@@ -963 +978,3 @@ class SmartDatasetUpdatePlan(Plan):
- dataset=self.dataset, old_revision=self.old_revision, new_revision=self.revision
+ dataset=self.dataset,
+ old_revision=self.old_revision,
+ new_revision=self.revision,
@@ -1007 +1024,5 @@ class SmartDatasetUpdatePlan(Plan):
- f"datasets/{self.dataset}/README.md", revision=self.revision, mode="r", newline="", encoding="utf-8"
+ f"datasets/{self.dataset}/README.md",
+ revision=self.revision,
+ mode="r",
+ newline="",
+ encoding="utf-8",
@@ -1063 +1084,3 @@ def remove_dataset(
- dataset: str, storage_clients: Optional[list[StorageClient]] = None, committer_hf_token: Optional[str] = None
+ dataset: str,
+ storage_clients: Optional[list[StorageClient]] = None,
+ committer_hf_token: Optional[str] = None,
@@ -1076 +1099,5 @@ def remove_dataset(
- plan = DatasetRemovalPlan(dataset=dataset, storage_clients=storage_clients, committer_hf_token=committer_hf_token)
+ plan = DatasetRemovalPlan(
+ dataset=dataset,
+ storage_clients=storage_clients,
+ committer_hf_token=committer_hf_token,
+ )
@@ -1236 +1263,4 @@ def finish_job(
- kind=processing_step.cache_kind, dataset=params["dataset"], config=params["config"], split=params["split"]
+ kind=processing_step.cache_kind,
+ dataset=params["dataset"],
+ config=params["config"],
+ split=params["split"],
@@ -1276 +1306,3 @@ def has_pending_ancestor_jobs(
- dataset: str, processing_step_name: str, processing_graph: ProcessingGraph = processing_graph
+ dataset: str,
+ processing_step_name: str,
+ processing_graph: ProcessingGraph = processing_graph,
diff --git a/services/worker/src/worker/config.py b/services/worker/src/worker/config.py
index dd888d54..3116279a 100644
--- a/services/worker/src/worker/config.py
+++ b/services/worker/src/worker/config.py
@@ -56,4 +55,0 @@ WORKER_STATE_FILE_PATH = None
-def get_empty_str_list() -> list[str]:
- return []
-
-
@@ -84 +80,2 @@ class WorkerConfig:
- name="HEARTBEAT_INTERVAL_SECONDS", default=WORKER_HEARTBEAT_INTERVAL_SECONDS
+ name="HEARTBEAT_INTERVAL_SECONDS",
+ default=WORKER_HEARTBEAT_INTERVAL_SECONDS,
@@ -87 +84,2 @@ class WorkerConfig:
- name="KILL_LONG_JOB_INTERVAL_SECONDS", default=WORKER_KILL_LONG_JOB_INTERVAL_SECONDS
+ name="KILL_LONG_JOB_INTERVAL_SECONDS",
+ default=WORKER_KILL_LONG_JOB_INTERVAL_SECONDS,
@@ -90 +88,2 @@ class WorkerConfig:
- name="KILL_ZOMBIES_INTERVAL_SECONDS", default=WORKER_KILL_ZOMBIES_INTERVAL_SECONDS
+ name="KILL_ZOMBIES_INTERVAL_SECONDS",
+ default=WORKER_KILL_ZOMBIES_INTERVAL_SECONDS,
@@ -93 +92,2 @@ class WorkerConfig:
- name="MAX_JOB_DURATION_SECONDS", default=WORKER_MAX_JOB_DURATION_SECONDS
+ name="MAX_JOB_DURATION_SECONDS",
+ default=WORKER_MAX_JOB_DURATION_SECONDS,
@@ -170 +170,4 @@ class OptInOutUrlsScanConfig:
- columns_max_number=env.int(name="COLUMNS_MAX_NUMBER", default=OPT_IN_OUT_URLS_SCAN_COLUMNS_MAX_NUMBER),
+ columns_max_number=env.int(
+ name="COLUMNS_MAX_NUMBER",
+ default=OPT_IN_OUT_URLS_SCAN_COLUMNS_MAX_NUMBER,
+ ),
@@ -172 +175,2 @@ class OptInOutUrlsScanConfig:
- name="MAX_CONCURRENT_REQUESTS_NUMBER", default=OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER
+ name="MAX_CONCURRENT_REQUESTS_NUMBER",
+ default=OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER,
@@ -175 +179,2 @@ class OptInOutUrlsScanConfig:
- name="MAX_REQUESTS_PER_SECOND", default=OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND
+ name="MAX_REQUESTS_PER_SECOND",
+ default=OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND,
@@ -181 +186,2 @@ class OptInOutUrlsScanConfig:
- name="URLS_NUMBER_PER_BATCH", default=OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH
+ name="URLS_NUMBER_PER_BATCH",
+ default=OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH,
@@ -203 +209,10 @@ class PresidioEntitiesScanConfig:
- name="COLUMNS_MAX_NUMBER", default=PRESIDIO_ENTITIES_SCAN_COLUMNS_MAX_NUMBER
+ name="COLUMNS_MAX_NUMBER",
+ default=PRESIDIO_ENTITIES_SCAN_COLUMNS_MAX_NUMBER,
+ ),
+ max_text_length=env.int(
+ name="MAX_TEXT_LENGTH",
+ default=PRESIDIO_ENTITIES_SCAN_MAX_TEXT_LENGTH,
+ ),
+ rows_max_number=env.int(
+ name="ROWS_MAX_NUMBER",
+ default=PRESIDIO_ENTITIES_SCAN_ROWS_MAX_NUMBER,
@@ -205,2 +219,0 @@ class PresidioEntitiesScanConfig:
- max_text_length=env.int(name="MAX_TEXT_LENGTH", default=PRESIDIO_ENTITIES_SCAN_MAX_TEXT_LENGTH),
- rows_max_number=env.int(name="ROWS_MAX_NUMBER", default=PRESIDIO_ENTITIES_SCAN_ROWS_MAX_NUMBER),
@@ -216,0 +230 @@ PARQUET_AND_INFO_URL_TEMPLATE = "/datasets/%s/resolve/%s/%s"
+PARQUET_AND_INFO_FULLY_CONVERTED_DATASETS: list[str] = []
@@ -226,0 +241 @@ class ParquetAndInfoConfig:
+ fully_converted_datasets: list[str] = field(default_factory=PARQUET_AND_INFO_FULLY_CONVERTED_DATASETS.copy)
@@ -235 +250,2 @@ class ParquetAndInfoConfig:
- name="MAX_DATASET_SIZE_BYTES", default=PARQUET_AND_INFO_MAX_DATASET_SIZE_BYTES
+ name="MAX_DATASET_SIZE_BYTES",
+ default=PARQUET_AND_INFO_MAX_DATASET_SIZE_BYTES,
@@ -238 +254,2 @@ class ParquetAndInfoConfig:
- name="MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY", default=PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY
+ name="MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY",
+ default=PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY,
@@ -305 +322,4 @@ class DescriptiveStatisticsConfig:
- parquet_revision = env.str(name="PARQUET_AND_INFO_TARGET_REVISION", default=PARQUET_AND_INFO_TARGET_REVISION)
+ parquet_revision = env.str(
+ name="PARQUET_AND_INFO_TARGET_REVISION",
+ default=PARQUET_AND_INFO_TARGET_REVISION,
+ )
@@ -308 +328,4 @@ class DescriptiveStatisticsConfig:
- cache_directory=env.str(name="CACHE_DIRECTORY", default=DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY),
+ cache_directory=env.str(
+ name="CACHE_DIRECTORY",
+ default=DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY,
+ ),
@@ -311 +334,2 @@ class DescriptiveStatisticsConfig:
- name="MAX_SPLIT_SIZE_BYTES", default=DESCRIPTIVE_STATISTICS_MAX_SPLIT_SIZE_BYTES
+ name="MAX_SPLIT_SIZE_BYTES",
+ default=DESCRIPTIVE_STATISTICS_MAX_SPLIT_SIZE_BYTES,
|
|
bae070f2cbc3d2e74f1e4f9fa89afd2131b197b1
|
Quentin Lhoest
| 2025-07-09T11:42:18 |
worker loop timeout (#3216)
|
diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
index ae798233..a66b3376 100644
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -76 +76 @@ class WorkerExecutor:
- return OutputExecutor(start_worker_loop_command, banner, timeout=20)
+ return OutputExecutor(start_worker_loop_command, banner, timeout=60)
|
|
a8ae77a6980456bb1f37c2c76da1b8c578d6e694
|
Quentin Lhoest
| 2025-07-09T11:31:05 |
fix fixed length list (#3215)
|
diff --git a/services/worker/src/worker/job_runners/config/parquet_and_info.py b/services/worker/src/worker/job_runners/config/parquet_and_info.py
index cdb63553..362cf266 100644
--- a/services/worker/src/worker/job_runners/config/parquet_and_info.py
+++ b/services/worker/src/worker/job_runners/config/parquet_and_info.py
@@ -1271 +1271,2 @@ def backward_compat_features(
- return [backward_compat_features(features_dict["feature"])]
+ if "length" not in features_dict or int(features_dict["length"]) == -1:
+ return [backward_compat_features(features_dict["feature"])]
|
End of preview. Expand
in Data Studio
- Downloads last month
- 30