text
stringlengths 31
361k
| type
stringclasses 6
values | start
int64 125
418k
| end
int64 325
418k
| depth
int64 0
8
| filepath
stringclasses 103
values | parent_class
stringclasses 106
values |
---|---|---|---|---|---|---|
if hyperparameters is not None:
model_card += "\n## Training procedure\n"
model_card += "\n### Training hyperparameters\n"
model_card += "\nThe following hyperparameters were used during training:\n\n"
model_card += hyperparameters
model_card += "\n" | if_statement | 4,353 | 4,643 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if plot_model and os.path.exists(f"{repo_dir}/model.png"):
model_card += "\n ## Model Plot\n"
model_card += "\n<details>"
model_card += "\n<summary>View Model Plot</summary>\n"
path_to_plot = "./model.png"
model_card += f"\n![Model Image]({path_to_plot})\n"
model_card += "\n</details>" | if_statement | 4,648 | 4,982 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
def save_pretrained_keras(
model,
save_directory: Union[str, Path],
config: Optional[Dict[str, Any]] = None,
include_optimizer: bool = False,
plot_model: bool = True,
tags: Optional[Union[list, str]] = None,
**model_save_kwargs,
):
"""
Saves a Keras model to save_directory in SavedModel format. Use this if
you're using the Functional or Sequential APIs.
Args:
model (`Keras.Model`):
The [Keras
model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
you'd like to save. The model must be compiled and built.
save_directory (`str` or `Path`):
Specify directory in which you want to save the Keras model.
config (`dict`, *optional*):
Configuration object to be saved alongside the model weights.
include_optimizer(`bool`, *optional*, defaults to `False`):
Whether or not to include optimizer in serialization.
plot_model (`bool`, *optional*, defaults to `True`):
Setting this to `True` will plot the model and put it in the model
card. Requires graphviz and pydot to be installed.
tags (Union[`str`,`list`], *optional*):
List of tags that are related to model or string of a single tag. See example tags
[here](https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1).
model_save_kwargs(`dict`, *optional*):
model_save_kwargs will be passed to
[`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model).
"""
if keras is None:
raise ImportError("Called a Tensorflow-specific function but could not import it.")
if not model.built:
raise ValueError("Model should be built before trying to save")
save_directory = Path(save_directory)
save_directory.mkdir(parents=True, exist_ok=True)
# saving config
if config:
if not isinstance(config, dict):
raise RuntimeError(f"Provided config to save_pretrained_keras should be a dict. Got: '{type(config)}'")
with (save_directory / constants.CONFIG_NAME).open("w") as f:
json.dump(config, f)
metadata = {}
if isinstance(tags, list):
metadata["tags"] = tags
elif isinstance(tags, str):
metadata["tags"] = [tags]
task_name = model_save_kwargs.pop("task_name", None)
if task_name is not None:
warnings.warn(
"`task_name` input argument is deprecated. Pass `tags` instead.",
FutureWarning,
)
if "tags" in metadata:
metadata["tags"].append(task_name)
else:
metadata["tags"] = [task_name]
if model.history is not None:
if model.history.history != {}:
path = save_directory / "history.json"
if path.exists():
warnings.warn(
"`history.json` file already exists, it will be overwritten by the history of this version.",
UserWarning,
)
with path.open("w", encoding="utf-8") as f:
json.dump(model.history.history, f, indent=2, sort_keys=True)
_create_model_card(model, save_directory, plot_model, metadata)
keras.models.save_model(model, save_directory, include_optimizer=include_optimizer, **model_save_kwargs) | function_definition | 5,050 | 8,454 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if keras is None:
raise ImportError("Called a Tensorflow-specific function but could not import it.") | if_statement | 6,681 | 6,790 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if not model.built:
raise ValueError("Model should be built before trying to save") | if_statement | 6,796 | 6,887 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if config:
if not isinstance(config, dict):
raise RuntimeError(f"Provided config to save_pretrained_keras should be a dict. Got: '{type(config)}'")
with (save_directory / constants.CONFIG_NAME).open("w") as f:
json.dump(config, f) | if_statement | 7,010 | 7,281 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if not isinstance(config, dict):
raise RuntimeError(f"Provided config to save_pretrained_keras should be a dict. Got: '{type(config)}'") | if_statement | 7,029 | 7,177 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if isinstance(tags, list):
metadata["tags"] = tags
elif isinstance(tags, str):
metadata["tags"] = [tags] | if_statement | 7,305 | 7,429 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if task_name is not None:
warnings.warn(
"`task_name` input argument is deprecated. Pass `tags` instead.",
FutureWarning,
)
if "tags" in metadata:
metadata["tags"].append(task_name)
else:
metadata["tags"] = [task_name] | if_statement | 7,492 | 7,790 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if "tags" in metadata:
metadata["tags"].append(task_name)
else:
metadata["tags"] = [task_name] | if_statement | 7,664 | 7,790 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if model.history is not None:
if model.history.history != {}:
path = save_directory / "history.json"
if path.exists():
warnings.warn(
"`history.json` file already exists, it will be overwritten by the history of this version.",
UserWarning,
)
with path.open("w", encoding="utf-8") as f:
json.dump(model.history.history, f, indent=2, sort_keys=True) | if_statement | 7,796 | 8,276 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if model.history.history != {}:
path = save_directory / "history.json"
if path.exists():
warnings.warn(
"`history.json` file already exists, it will be overwritten by the history of this version.",
UserWarning,
)
with path.open("w", encoding="utf-8") as f:
json.dump(model.history.history, f, indent=2, sort_keys=True) | if_statement | 7,834 | 8,276 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if path.exists():
warnings.warn(
"`history.json` file already exists, it will be overwritten by the history of this version.",
UserWarning,
) | if_statement | 7,929 | 8,142 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
def from_pretrained_keras(*args, **kwargs) -> "KerasModelHubMixin":
r"""
Instantiate a pretrained Keras model from a pre-trained model from the Hub.
The model is expected to be in `SavedModel` format.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a
model repo on huggingface.co. Valid model ids can be located
at the root-level, like `bert-base-uncased`, or namespaced
under a user or organization name, like
`dbmdz/bert-base-german-cased`.
- You can add `revision` by appending `@` at the end of model_id
simply like this: `dbmdz/bert-base-german-cased@main` Revision
is the specific model version to use. It can be a branch name,
a tag name, or a commit id, since we use a git-based system
for storing models and other artifacts on huggingface.co, so
`revision` can be any identifier allowed by git.
- A path to a `directory` containing model weights saved using
[`~transformers.PreTrainedModel.save_pretrained`], e.g.,
`./my_model_directory/`.
- `None` if you are both providing the configuration and state
dictionary (resp. with keyword arguments `config` and
`state_dict`).
force_download (`bool`, *optional*, defaults to `False`):
Whether to force the (re-)download of the model weights and
configuration files, overriding the cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The
proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If
`True`, will use the token generated when running `transformers-cli
login` (stored in `~/.huggingface`).
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory in which a downloaded pretrained model
configuration should be cached if the standard cache should not be
used.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether to only look at local files (i.e., do not try to download
the model).
model_kwargs (`Dict`, *optional*):
model_kwargs will be passed to the model during initialization
<Tip>
Passing `token=True` is required when you want to use a private
model.
</Tip>
"""
return KerasModelHubMixin.from_pretrained(*args, **kwargs) | function_definition | 8,457 | 11,375 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
def push_to_hub_keras(
model,
repo_id: str,
*,
config: Optional[dict] = None,
commit_message: str = "Push Keras model using huggingface_hub.",
private: Optional[bool] = None,
api_endpoint: Optional[str] = None,
token: Optional[str] = None,
branch: Optional[str] = None,
create_pr: Optional[bool] = None,
allow_patterns: Optional[Union[List[str], str]] = None,
ignore_patterns: Optional[Union[List[str], str]] = None,
delete_patterns: Optional[Union[List[str], str]] = None,
log_dir: Optional[str] = None,
include_optimizer: bool = False,
tags: Optional[Union[list, str]] = None,
plot_model: bool = True,
**model_save_kwargs,
):
"""
Upload model checkpoint to the Hub.
Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use
`delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more
details.
Args:
model (`Keras.Model`):
The [Keras model](`https://www.tensorflow.org/api_docs/python/tf/keras/Model`) you'd like to push to the
Hub. The model must be compiled and built.
repo_id (`str`):
ID of the repository to push to (example: `"username/my-model"`).
commit_message (`str`, *optional*, defaults to "Add Keras model"):
Message to commit while pushing.
private (`bool`, *optional*):
Whether the repository created should be private.
If `None` (default), the repo will be public unless the organization's default is private.
api_endpoint (`str`, *optional*):
The API endpoint to use when pushing the model to the hub.
token (`str`, *optional*):
The token to use as HTTP bearer authorization for remote files. If
not set, will use the token set when logging in with
`huggingface-cli login` (stored in `~/.huggingface`).
branch (`str`, *optional*):
The git branch on which to push the model. This defaults to
the default branch as specified in your repository, which
defaults to `"main"`.
create_pr (`boolean`, *optional*):
Whether or not to create a Pull Request from `branch` with that commit.
Defaults to `False`.
config (`dict`, *optional*):
Configuration object to be saved alongside the model weights.
allow_patterns (`List[str]` or `str`, *optional*):
If provided, only files matching at least one pattern are pushed.
ignore_patterns (`List[str]` or `str`, *optional*):
If provided, files matching any of the patterns are not pushed.
delete_patterns (`List[str]` or `str`, *optional*):
If provided, remote files matching any of the patterns will be deleted from the repo.
log_dir (`str`, *optional*):
TensorBoard logging directory to be pushed. The Hub automatically
hosts and displays a TensorBoard instance if log files are included
in the repository.
include_optimizer (`bool`, *optional*, defaults to `False`):
Whether or not to include optimizer during serialization.
tags (Union[`list`, `str`], *optional*):
List of tags that are related to model or string of a single tag. See example tags
[here](https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1).
plot_model (`bool`, *optional*, defaults to `True`):
Setting this to `True` will plot the model and put it in the model
card. Requires graphviz and pydot to be installed.
model_save_kwargs(`dict`, *optional*):
model_save_kwargs will be passed to
[`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model).
Returns:
The url of the commit of your model in the given repository.
"""
api = HfApi(endpoint=api_endpoint)
repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id
# Push the files to the repo in a single commit
with SoftTemporaryDirectory() as tmp:
saved_path = Path(tmp) / repo_id
save_pretrained_keras(
model,
saved_path,
config=config,
include_optimizer=include_optimizer,
tags=tags,
plot_model=plot_model,
**model_save_kwargs,
)
# If `log_dir` provided, delete remote logs and upload new ones
if log_dir is not None:
delete_patterns = (
[]
if delete_patterns is None
else (
[delete_patterns] # convert `delete_patterns` to a list
if isinstance(delete_patterns, str)
else delete_patterns
)
)
delete_patterns.append("logs/*")
copytree(log_dir, saved_path / "logs")
return api.upload_folder(
repo_type="model",
repo_id=repo_id,
folder_path=saved_path,
commit_message=commit_message,
token=token,
revision=branch,
create_pr=create_pr,
allow_patterns=allow_patterns,
ignore_patterns=ignore_patterns,
delete_patterns=delete_patterns,
) | function_definition | 11,425 | 16,890 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
if log_dir is not None:
delete_patterns = (
[]
if delete_patterns is None
else (
[delete_patterns] # convert `delete_patterns` to a list
if isinstance(delete_patterns, str)
else delete_patterns
)
)
delete_patterns.append("logs/*")
copytree(log_dir, saved_path / "logs") | if_statement | 16,044 | 16,486 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
class KerasModelHubMixin(ModelHubMixin):
"""
Implementation of [`ModelHubMixin`] to provide model Hub upload/download
capabilities to Keras models.
```python
>>> import tensorflow as tf
>>> from huggingface_hub import KerasModelHubMixin
>>> class MyModel(tf.keras.Model, KerasModelHubMixin):
... def __init__(self, **kwargs):
... super().__init__()
... self.config = kwargs.pop("config", None)
... self.dummy_inputs = ...
... self.layer = ...
... def call(self, *args):
... return ...
>>> # Initialize and compile the model as you normally would
>>> model = MyModel()
>>> model.compile(...)
>>> # Build the graph by training it or passing dummy inputs
>>> _ = model(model.dummy_inputs)
>>> # Save model weights to local directory
>>> model.save_pretrained("my-awesome-model")
>>> # Push model weights to the Hub
>>> model.push_to_hub("my-awesome-model")
>>> # Download and initialize weights from the Hub
>>> model = MyModel.from_pretrained("username/super-cool-model")
```
"""
def _save_pretrained(self, save_directory):
save_pretrained_keras(self, save_directory)
@classmethod
def _from_pretrained(
cls,
model_id,
revision,
cache_dir,
force_download,
proxies,
resume_download,
local_files_only,
token,
config: Optional[Dict[str, Any]] = None,
**model_kwargs,
):
"""Here we just call [`from_pretrained_keras`] function so both the mixin and
functional APIs stay in sync.
TODO - Some args above aren't used since we are calling
snapshot_download instead of hf_hub_download.
"""
if keras is None:
raise ImportError("Called a TensorFlow-specific function but could not import it.")
# Root is either a local filepath matching model_id or a cached snapshot
if not os.path.isdir(model_id):
storage_folder = snapshot_download(
repo_id=model_id,
revision=revision,
cache_dir=cache_dir,
library_name="keras",
library_version=get_tf_version(),
)
else:
storage_folder = model_id
# TODO: change this in a future PR. We are not returning a KerasModelHubMixin instance here...
model = keras.models.load_model(storage_folder)
# For now, we add a new attribute, config, to store the config loaded from the hub/a local dir.
model.config = config
return model | class_definition | 16,893 | 19,573 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | null |
def _save_pretrained(self, save_directory):
save_pretrained_keras(self, save_directory) | function_definition | 18,039 | 18,134 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | KerasModelHubMixin |
def _from_pretrained(
cls,
model_id,
revision,
cache_dir,
force_download,
proxies,
resume_download,
local_files_only,
token,
config: Optional[Dict[str, Any]] = None,
**model_kwargs,
):
"""Here we just call [`from_pretrained_keras`] function so both the mixin and
functional APIs stay in sync.
TODO - Some args above aren't used since we are calling
snapshot_download instead of hf_hub_download.
"""
if keras is None:
raise ImportError("Called a TensorFlow-specific function but could not import it.")
# Root is either a local filepath matching model_id or a cached snapshot
if not os.path.isdir(model_id):
storage_folder = snapshot_download(
repo_id=model_id,
revision=revision,
cache_dir=cache_dir,
library_name="keras",
library_version=get_tf_version(),
)
else:
storage_folder = model_id
# TODO: change this in a future PR. We are not returning a KerasModelHubMixin instance here...
model = keras.models.load_model(storage_folder)
# For now, we add a new attribute, config, to store the config loaded from the hub/a local dir.
model.config = config
return model | function_definition | 18,157 | 19,573 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | KerasModelHubMixin |
if keras is None:
raise ImportError("Called a TensorFlow-specific function but could not import it.") | if_statement | 18,713 | 18,826 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | KerasModelHubMixin |
if not os.path.isdir(model_id):
storage_folder = snapshot_download(
repo_id=model_id,
revision=revision,
cache_dir=cache_dir,
library_name="keras",
library_version=get_tf_version(),
)
else:
storage_folder = model_id | if_statement | 18,917 | 19,256 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/keras_mixin.py | KerasModelHubMixin |
def _is_true(value: Optional[str]) -> bool:
if value is None:
return False
return value.upper() in ENV_VARS_TRUE_VALUES | function_definition | 239 | 374 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/constants.py | null |
if value is None:
return False | if_statement | 287 | 325 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/constants.py | null |
def _as_int(value: Optional[str]) -> Optional[int]:
if value is None:
return None
return int(value) | function_definition | 377 | 492 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/constants.py | null |
if value is None:
return None | if_statement | 433 | 470 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/constants.py | null |
if _staging_mode:
# In staging mode, we use a different cache to ensure we don't mix up production and staging data or tokens
_staging_home = os.path.join(os.path.expanduser("~"), ".cache", "huggingface_staging")
HUGGINGFACE_HUB_CACHE = os.path.join(_staging_home, "hub")
_OLD_HF_TOKEN_PATH = os.path.join(_staging_home, "_old_token")
HF_TOKEN_PATH = os.path.join(_staging_home, "token") | if_statement | 4,911 | 5,318 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/constants.py | null |
class _FileToUpload:
"""Temporary dataclass to store info about files to upload. Not meant to be used directly."""
local_path: Path
path_in_repo: str
size_limit: int
last_modified: float | class_definition | 461 | 668 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | null |
class CommitScheduler:
"""
Scheduler to upload a local folder to the Hub at regular intervals (e.g. push to hub every 5 minutes).
The recommended way to use the scheduler is to use it as a context manager. This ensures that the scheduler is
properly stopped and the last commit is triggered when the script ends. The scheduler can also be stopped manually
with the `stop` method. Checkout the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#scheduled-uploads)
to learn more about how to use it.
Args:
repo_id (`str`):
The id of the repo to commit to.
folder_path (`str` or `Path`):
Path to the local folder to upload regularly.
every (`int` or `float`, *optional*):
The number of minutes between each commit. Defaults to 5 minutes.
path_in_repo (`str`, *optional*):
Relative path of the directory in the repo, for example: `"checkpoints/"`. Defaults to the root folder
of the repository.
repo_type (`str`, *optional*):
The type of the repo to commit to. Defaults to `model`.
revision (`str`, *optional*):
The revision of the repo to commit to. Defaults to `main`.
private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
token (`str`, *optional*):
The token to use to commit to the repo. Defaults to the token saved on the machine.
allow_patterns (`List[str]` or `str`, *optional*):
If provided, only files matching at least one pattern are uploaded.
ignore_patterns (`List[str]` or `str`, *optional*):
If provided, files matching any of the patterns are not uploaded.
squash_history (`bool`, *optional*):
Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
useful to avoid degraded performances on the repo when it grows too large.
hf_api (`HfApi`, *optional*):
The [`HfApi`] client to use to commit to the Hub. Can be set with custom settings (user agent, token,...).
Example:
```py
>>> from pathlib import Path
>>> from huggingface_hub import CommitScheduler
# Scheduler uploads every 10 minutes
>>> csv_path = Path("watched_folder/data.csv")
>>> CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path=csv_path.parent, every=10)
>>> with csv_path.open("a") as f:
... f.write("first line")
# Some time later (...)
>>> with csv_path.open("a") as f:
... f.write("second line")
```
Example using a context manager:
```py
>>> from pathlib import Path
>>> from huggingface_hub import CommitScheduler
>>> with CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path="watched_folder", every=10) as scheduler:
... csv_path = Path("watched_folder/data.csv")
... with csv_path.open("a") as f:
... f.write("first line")
... (...)
... with csv_path.open("a") as f:
... f.write("second line")
# Scheduler is now stopped and last commit have been triggered
```
"""
def __init__(
self,
*,
repo_id: str,
folder_path: Union[str, Path],
every: Union[int, float] = 5,
path_in_repo: Optional[str] = None,
repo_type: Optional[str] = None,
revision: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
allow_patterns: Optional[Union[List[str], str]] = None,
ignore_patterns: Optional[Union[List[str], str]] = None,
squash_history: bool = False,
hf_api: Optional["HfApi"] = None,
) -> None:
self.api = hf_api or HfApi(token=token)
# Folder
self.folder_path = Path(folder_path).expanduser().resolve()
self.path_in_repo = path_in_repo or ""
self.allow_patterns = allow_patterns
if ignore_patterns is None:
ignore_patterns = []
elif isinstance(ignore_patterns, str):
ignore_patterns = [ignore_patterns]
self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
if self.folder_path.is_file():
raise ValueError(f"'folder_path' must be a directory, not a file: '{self.folder_path}'.")
self.folder_path.mkdir(parents=True, exist_ok=True)
# Repository
repo_url = self.api.create_repo(repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True)
self.repo_id = repo_url.repo_id
self.repo_type = repo_type
self.revision = revision
self.token = token
# Keep track of already uploaded files
self.last_uploaded: Dict[Path, float] = {} # key is local path, value is timestamp
# Scheduler
if not every > 0:
raise ValueError(f"'every' must be a positive integer, not '{every}'.")
self.lock = Lock()
self.every = every
self.squash_history = squash_history
logger.info(f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes.")
self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
self._scheduler_thread.start()
atexit.register(self._push_to_hub)
self.__stopped = False
def stop(self) -> None:
"""Stop the scheduler.
A stopped scheduler cannot be restarted. Mostly for tests purposes.
"""
self.__stopped = True
def __enter__(self) -> "CommitScheduler":
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
# Upload last changes before exiting
self.trigger().result()
self.stop()
return
def _run_scheduler(self) -> None:
"""Dumb thread waiting between each scheduled push to Hub."""
while True:
self.last_future = self.trigger()
time.sleep(self.every * 60)
if self.__stopped:
break
def trigger(self) -> Future:
"""Trigger a `push_to_hub` and return a future.
This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
immediately, without waiting for the next scheduled commit.
"""
return self.api.run_as_future(self._push_to_hub)
def _push_to_hub(self) -> Optional[CommitInfo]:
if self.__stopped: # If stopped, already scheduled commits are ignored
return None
logger.info("(Background) scheduled commit triggered.")
try:
value = self.push_to_hub()
if self.squash_history:
logger.info("(Background) squashing repo history.")
self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision)
return value
except Exception as e:
logger.error(f"Error while pushing to Hub: {e}") # Depending on the setup, error might be silenced
raise
def push_to_hub(self) -> Optional[CommitInfo]:
"""
Push folder to the Hub and return the commit info.
<Tip warning={true}>
This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
issues.
</Tip>
The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
uploads only changed files. If no changes are found, the method returns without committing anything. If you want
to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
for example to compress data together in a single file before committing. For more details and examples, check
out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
"""
# Check files to upload (with lock)
with self.lock:
logger.debug("Listing files to upload for scheduled commit.")
# List files from folder (taken from `_prepare_upload_folder_additions`)
relpath_to_abspath = {
path.relative_to(self.folder_path).as_posix(): path
for path in sorted(self.folder_path.glob("**/*")) # sorted to be deterministic
if path.is_file()
}
prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
# Filter with pattern + filter out unchanged files + retrieve current file size
files_to_upload: List[_FileToUpload] = []
for relpath in filter_repo_objects(
relpath_to_abspath.keys(), allow_patterns=self.allow_patterns, ignore_patterns=self.ignore_patterns
):
local_path = relpath_to_abspath[relpath]
stat = local_path.stat()
if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime:
files_to_upload.append(
_FileToUpload(
local_path=local_path,
path_in_repo=prefix + relpath,
size_limit=stat.st_size,
last_modified=stat.st_mtime,
)
)
# Return if nothing to upload
if len(files_to_upload) == 0:
logger.debug("Dropping schedule commit: no changed file to upload.")
return None
# Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
logger.debug("Removing unchanged files since previous scheduled commit.")
add_operations = [
CommitOperationAdd(
# Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
path_or_fileobj=PartialFileIO(file_to_upload.local_path, size_limit=file_to_upload.size_limit),
path_in_repo=file_to_upload.path_in_repo,
)
for file_to_upload in files_to_upload
]
# Upload files (append mode expected - no need for lock)
logger.debug("Uploading files for scheduled commit.")
commit_info = self.api.create_commit(
repo_id=self.repo_id,
repo_type=self.repo_type,
operations=add_operations,
commit_message="Scheduled Commit",
revision=self.revision,
)
# Successful commit: keep track of the latest "last_modified" for each file
for file in files_to_upload:
self.last_uploaded[file.local_path] = file.last_modified
return commit_info | class_definition | 671 | 11,802 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | null |
def __init__(
self,
*,
repo_id: str,
folder_path: Union[str, Path],
every: Union[int, float] = 5,
path_in_repo: Optional[str] = None,
repo_type: Optional[str] = None,
revision: Optional[str] = None,
private: Optional[bool] = None,
token: Optional[str] = None,
allow_patterns: Optional[Union[List[str], str]] = None,
ignore_patterns: Optional[Union[List[str], str]] = None,
squash_history: bool = False,
hf_api: Optional["HfApi"] = None,
) -> None:
self.api = hf_api or HfApi(token=token)
# Folder
self.folder_path = Path(folder_path).expanduser().resolve()
self.path_in_repo = path_in_repo or ""
self.allow_patterns = allow_patterns
if ignore_patterns is None:
ignore_patterns = []
elif isinstance(ignore_patterns, str):
ignore_patterns = [ignore_patterns]
self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
if self.folder_path.is_file():
raise ValueError(f"'folder_path' must be a directory, not a file: '{self.folder_path}'.")
self.folder_path.mkdir(parents=True, exist_ok=True)
# Repository
repo_url = self.api.create_repo(repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True)
self.repo_id = repo_url.repo_id
self.repo_type = repo_type
self.revision = revision
self.token = token
# Keep track of already uploaded files
self.last_uploaded: Dict[Path, float] = {} # key is local path, value is timestamp
# Scheduler
if not every > 0:
raise ValueError(f"'every' must be a positive integer, not '{every}'.")
self.lock = Lock()
self.every = every
self.squash_history = squash_history
logger.info(f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes.")
self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
self._scheduler_thread.start()
atexit.register(self._push_to_hub)
self.__stopped = False | function_definition | 4,041 | 6,218 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if ignore_patterns is None:
ignore_patterns = []
elif isinstance(ignore_patterns, str):
ignore_patterns = [ignore_patterns] | if_statement | 4,840 | 4,995 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if self.folder_path.is_file():
raise ValueError(f"'folder_path' must be a directory, not a file: '{self.folder_path}'.") | if_statement | 5,078 | 5,210 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if not every > 0:
raise ValueError(f"'every' must be a positive integer, not '{every}'.") | if_statement | 5,707 | 5,808 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
def stop(self) -> None:
"""Stop the scheduler.
A stopped scheduler cannot be restarted. Mostly for tests purposes.
"""
self.__stopped = True | function_definition | 6,224 | 6,397 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
def __enter__(self) -> "CommitScheduler":
return self | function_definition | 6,403 | 6,464 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
def __exit__(self, exc_type, exc_value, traceback) -> None:
# Upload last changes before exiting
self.trigger().result()
self.stop()
return | function_definition | 6,470 | 6,641 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
def _run_scheduler(self) -> None:
"""Dumb thread waiting between each scheduled push to Hub."""
while True:
self.last_future = self.trigger()
time.sleep(self.every * 60)
if self.__stopped:
break | function_definition | 6,647 | 6,909 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
while True:
self.last_future = self.trigger()
time.sleep(self.every * 60)
if self.__stopped:
break | while_statement | 6,759 | 6,909 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if self.__stopped:
break | if_statement | 6,869 | 6,909 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
def trigger(self) -> Future:
"""Trigger a `push_to_hub` and return a future.
This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
immediately, without waiting for the next scheduled commit.
"""
return self.api.run_as_future(self._push_to_hub) | function_definition | 6,915 | 7,254 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
def _push_to_hub(self) -> Optional[CommitInfo]:
if self.__stopped: # If stopped, already scheduled commits are ignored
return None
logger.info("(Background) scheduled commit triggered.")
try:
value = self.push_to_hub()
if self.squash_history:
logger.info("(Background) squashing repo history.")
self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision)
return value
except Exception as e:
logger.error(f"Error while pushing to Hub: {e}") # Depending on the setup, error might be silenced
raise | function_definition | 7,260 | 7,934 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if self.__stopped: # If stopped, already scheduled commits are ignored
return None | if_statement | 7,316 | 7,411 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
try:
value = self.push_to_hub()
if self.squash_history:
logger.info("(Background) squashing repo history.")
self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision)
return value
except Exception as e:
logger.error(f"Error while pushing to Hub: {e}") # Depending on the setup, error might be silenced
raise | try_statement | 7,485 | 7,934 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if self.squash_history:
logger.info("(Background) squashing repo history.")
self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision) | if_statement | 7,541 | 7,748 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
def push_to_hub(self) -> Optional[CommitInfo]:
"""
Push folder to the Hub and return the commit info.
<Tip warning={true}>
This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
issues.
</Tip>
The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
uploads only changed files. If no changes are found, the method returns without committing anything. If you want
to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
for example to compress data together in a single file before committing. For more details and examples, check
out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
"""
# Check files to upload (with lock)
with self.lock:
logger.debug("Listing files to upload for scheduled commit.")
# List files from folder (taken from `_prepare_upload_folder_additions`)
relpath_to_abspath = {
path.relative_to(self.folder_path).as_posix(): path
for path in sorted(self.folder_path.glob("**/*")) # sorted to be deterministic
if path.is_file()
}
prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
# Filter with pattern + filter out unchanged files + retrieve current file size
files_to_upload: List[_FileToUpload] = []
for relpath in filter_repo_objects(
relpath_to_abspath.keys(), allow_patterns=self.allow_patterns, ignore_patterns=self.ignore_patterns
):
local_path = relpath_to_abspath[relpath]
stat = local_path.stat()
if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime:
files_to_upload.append(
_FileToUpload(
local_path=local_path,
path_in_repo=prefix + relpath,
size_limit=stat.st_size,
last_modified=stat.st_mtime,
)
)
# Return if nothing to upload
if len(files_to_upload) == 0:
logger.debug("Dropping schedule commit: no changed file to upload.")
return None
# Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
logger.debug("Removing unchanged files since previous scheduled commit.")
add_operations = [
CommitOperationAdd(
# Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
path_or_fileobj=PartialFileIO(file_to_upload.local_path, size_limit=file_to_upload.size_limit),
path_in_repo=file_to_upload.path_in_repo,
)
for file_to_upload in files_to_upload
]
# Upload files (append mode expected - no need for lock)
logger.debug("Uploading files for scheduled commit.")
commit_info = self.api.create_commit(
repo_id=self.repo_id,
repo_type=self.repo_type,
operations=add_operations,
commit_message="Scheduled Commit",
revision=self.revision,
)
# Successful commit: keep track of the latest "last_modified" for each file
for file in files_to_upload:
self.last_uploaded[file.local_path] = file.last_modified
return commit_info | function_definition | 7,940 | 11,802 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
for relpath in filter_repo_objects(
relpath_to_abspath.keys(), allow_patterns=self.allow_patterns, ignore_patterns=self.ignore_patterns
):
local_path = relpath_to_abspath[relpath]
stat = local_path.stat()
if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime:
files_to_upload.append(
_FileToUpload(
local_path=local_path,
path_in_repo=prefix + relpath,
size_limit=stat.st_size,
last_modified=stat.st_mtime,
)
) | for_statement | 9,682 | 10,411 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime:
files_to_upload.append(
_FileToUpload(
local_path=local_path,
path_in_repo=prefix + relpath,
size_limit=stat.st_size,
last_modified=stat.st_mtime,
)
) | if_statement | 9,963 | 10,411 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
if len(files_to_upload) == 0:
logger.debug("Dropping schedule commit: no changed file to upload.")
return None | if_statement | 10,459 | 10,593 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
for file in files_to_upload:
self.last_uploaded[file.local_path] = file.last_modified | for_statement | 11,678 | 11,775 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | CommitScheduler |
class PartialFileIO(BytesIO):
"""A file-like object that reads only the first part of a file.
Useful to upload a file to the Hub when the user might still be appending data to it. Only the first part of the
file is uploaded (i.e. the part that was available when the filesystem was first scanned).
In practice, only used internally by the CommitScheduler to regularly push a folder to the Hub with minimal
disturbance for the user. The object is passed to `CommitOperationAdd`.
Only supports `read`, `tell` and `seek` methods.
Args:
file_path (`str` or `Path`):
Path to the file to read.
size_limit (`int`):
The maximum number of bytes to read from the file. If the file is larger than this, only the first part
will be read (and uploaded).
"""
def __init__(self, file_path: Union[str, Path], size_limit: int) -> None:
self._file_path = Path(file_path)
self._file = self._file_path.open("rb")
self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size)
def __del__(self) -> None:
self._file.close()
return super().__del__()
def __repr__(self) -> str:
return f"<PartialFileIO file_path={self._file_path} size_limit={self._size_limit}>"
def __len__(self) -> int:
return self._size_limit
def __getattribute__(self, name: str):
if name.startswith("_") or name in ("read", "tell", "seek"): # only 3 public methods supported
return super().__getattribute__(name)
raise NotImplementedError(f"PartialFileIO does not support '{name}'.")
def tell(self) -> int:
"""Return the current file position."""
return self._file.tell()
def seek(self, __offset: int, __whence: int = SEEK_SET) -> int:
"""Change the stream position to the given offset.
Behavior is the same as a regular file, except that the position is capped to the size limit.
"""
if __whence == SEEK_END:
# SEEK_END => set from the truncated end
__offset = len(self) + __offset
__whence = SEEK_SET
pos = self._file.seek(__offset, __whence)
if pos > self._size_limit:
return self._file.seek(self._size_limit)
return pos
def read(self, __size: Optional[int] = -1) -> bytes:
"""Read at most `__size` bytes from the file.
Behavior is the same as a regular file, except that it is capped to the size limit.
"""
current = self._file.tell()
if __size is None or __size < 0:
# Read until file limit
truncated_size = self._size_limit - current
else:
# Read until file limit or __size
truncated_size = min(__size, self._size_limit - current)
return self._file.read(truncated_size) | class_definition | 11,805 | 14,678 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | null |
def __init__(self, file_path: Union[str, Path], size_limit: int) -> None:
self._file_path = Path(file_path)
self._file = self._file_path.open("rb")
self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size) | function_definition | 12,643 | 12,888 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def __del__(self) -> None:
self._file.close()
return super().__del__() | function_definition | 12,894 | 12,980 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def __repr__(self) -> str:
return f"<PartialFileIO file_path={self._file_path} size_limit={self._size_limit}>" | function_definition | 12,986 | 13,104 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def __len__(self) -> int:
return self._size_limit | function_definition | 13,110 | 13,167 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def __getattribute__(self, name: str):
if name.startswith("_") or name in ("read", "tell", "seek"): # only 3 public methods supported
return super().__getattribute__(name)
raise NotImplementedError(f"PartialFileIO does not support '{name}'.") | function_definition | 13,173 | 13,444 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
if name.startswith("_") or name in ("read", "tell", "seek"): # only 3 public methods supported
return super().__getattribute__(name) | if_statement | 13,220 | 13,365 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def tell(self) -> int:
"""Return the current file position."""
return self._file.tell() | function_definition | 13,450 | 13,553 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def seek(self, __offset: int, __whence: int = SEEK_SET) -> int:
"""Change the stream position to the given offset.
Behavior is the same as a regular file, except that the position is capped to the size limit.
"""
if __whence == SEEK_END:
# SEEK_END => set from the truncated end
__offset = len(self) + __offset
__whence = SEEK_SET
pos = self._file.seek(__offset, __whence)
if pos > self._size_limit:
return self._file.seek(self._size_limit)
return pos | function_definition | 13,559 | 14,116 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
if __whence == SEEK_END:
# SEEK_END => set from the truncated end
__offset = len(self) + __offset
__whence = SEEK_SET | if_statement | 13,805 | 13,958 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
if pos > self._size_limit:
return self._file.seek(self._size_limit) | if_statement | 14,018 | 14,097 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def read(self, __size: Optional[int] = -1) -> bytes:
"""Read at most `__size` bytes from the file.
Behavior is the same as a regular file, except that it is capped to the size limit.
"""
current = self._file.tell()
if __size is None or __size < 0:
# Read until file limit
truncated_size = self._size_limit - current
else:
# Read until file limit or __size
truncated_size = min(__size, self._size_limit - current)
return self._file.read(truncated_size) | function_definition | 14,122 | 14,678 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
if __size is None or __size < 0:
# Read until file limit
truncated_size = self._size_limit - current
else:
# Read until file limit or __size
truncated_size = min(__size, self._size_limit - current) | if_statement | 14,378 | 14,631 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_scheduler.py | PartialFileIO |
def _attach(package_name, submodules=None, submod_attrs=None):
"""Attach lazily loaded submodules, functions, or other attributes.
Typically, modules import submodules and attributes as follows:
```py
import mysubmodule
import anothersubmodule
from .foo import someattr
```
The idea is to replace a package's `__getattr__`, `__dir__`, and
`__all__`, such that all imports work exactly the way they would
with normal imports, except that the import occurs upon first use.
The typical way to call this function, replacing the above imports, is:
```python
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
['mysubmodule', 'anothersubmodule'],
{'foo': ['someattr']}
)
```
This functionality requires Python 3.7 or higher.
Args:
package_name (`str`):
Typically use `__name__`.
submodules (`set`):
List of submodules to attach.
submod_attrs (`dict`):
Dictionary of submodule -> list of attributes / functions.
These attributes are imported as they are used.
Returns:
__getattr__, __dir__, __all__
"""
if submod_attrs is None:
submod_attrs = {}
if submodules is None:
submodules = set()
else:
submodules = set(submodules)
attr_to_modules = {attr: mod for mod, attrs in submod_attrs.items() for attr in attrs}
__all__ = list(submodules | attr_to_modules.keys())
def __getattr__(name):
if name in submodules:
try:
return importlib.import_module(f"{package_name}.{name}")
except Exception as e:
print(f"Error importing {package_name}.{name}: {e}")
raise
elif name in attr_to_modules:
submod_path = f"{package_name}.{attr_to_modules[name]}"
try:
submod = importlib.import_module(submod_path)
except Exception as e:
print(f"Error importing {submod_path}: {e}")
raise
attr = getattr(submod, name)
# If the attribute lives in a file (module) with the same
# name as the attribute, ensure that the attribute and *not*
# the module is accessible on the package.
if name == attr_to_modules[name]:
pkg = sys.modules[package_name]
pkg.__dict__[name] = attr
return attr
else:
raise AttributeError(f"No {package_name} attribute {name}")
def __dir__():
return __all__
return __getattr__, __dir__, list(__all__) | function_definition | 15,651 | 18,301 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
if submod_attrs is None:
submod_attrs = {} | if_statement | 16,844 | 16,894 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
if submodules is None:
submodules = set()
else:
submodules = set(submodules) | if_statement | 16,900 | 16,996 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
def __getattr__(name):
if name in submodules:
try:
return importlib.import_module(f"{package_name}.{name}")
except Exception as e:
print(f"Error importing {package_name}.{name}: {e}")
raise
elif name in attr_to_modules:
submod_path = f"{package_name}.{attr_to_modules[name]}"
try:
submod = importlib.import_module(submod_path)
except Exception as e:
print(f"Error importing {submod_path}: {e}")
raise
attr = getattr(submod, name)
# If the attribute lives in a file (module) with the same
# name as the attribute, ensure that the attribute and *not*
# the module is accessible on the package.
if name == attr_to_modules[name]:
pkg = sys.modules[package_name]
pkg.__dict__[name] = attr
return attr
else:
raise AttributeError(f"No {package_name} attribute {name}") | function_definition | 17,151 | 18,210 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
if name in submodules:
try:
return importlib.import_module(f"{package_name}.{name}")
except Exception as e:
print(f"Error importing {package_name}.{name}: {e}")
raise
elif name in attr_to_modules:
submod_path = f"{package_name}.{attr_to_modules[name]}"
try:
submod = importlib.import_module(submod_path)
except Exception as e:
print(f"Error importing {submod_path}: {e}")
raise
attr = getattr(submod, name)
# If the attribute lives in a file (module) with the same
# name as the attribute, ensure that the attribute and *not*
# the module is accessible on the package.
if name == attr_to_modules[name]:
pkg = sys.modules[package_name]
pkg.__dict__[name] = attr
return attr
else:
raise AttributeError(f"No {package_name} attribute {name}") | if_statement | 17,182 | 18,210 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
try:
return importlib.import_module(f"{package_name}.{name}")
except Exception as e:
print(f"Error importing {package_name}.{name}: {e}")
raise | try_statement | 17,217 | 17,420 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
try:
submod = importlib.import_module(submod_path)
except Exception as e:
print(f"Error importing {submod_path}: {e}")
raise | try_statement | 17,539 | 17,723 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
if name == attr_to_modules[name]:
pkg = sys.modules[package_name]
pkg.__dict__[name] = attr | if_statement | 17,976 | 18,099 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
def __dir__():
return __all__ | function_definition | 18,216 | 18,253 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
if os.environ.get("EAGER_IMPORT", ""):
for attr in __all__:
__getattr__(attr) | if_statement | 18,398 | 18,487 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
for attr in __all__:
__getattr__(attr) | for_statement | 18,441 | 18,487 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
if TYPE_CHECKING: # pragma: no cover
from ._commit_scheduler import CommitScheduler # noqa: F401
from ._inference_endpoints import (
InferenceEndpoint, # noqa: F401
InferenceEndpointError, # noqa: F401
InferenceEndpointStatus, # noqa: F401
InferenceEndpointTimeoutError, # noqa: F401
InferenceEndpointType, # noqa: F401
)
from ._login import (
auth_list, # noqa: F401
auth_switch, # noqa: F401
interpreter_login, # noqa: F401
login, # noqa: F401
logout, # noqa: F401
notebook_login, # noqa: F401
)
from ._snapshot_download import snapshot_download # noqa: F401
from ._space_api import (
SpaceHardware, # noqa: F401
SpaceRuntime, # noqa: F401
SpaceStage, # noqa: F401
SpaceStorage, # noqa: F401
SpaceVariable, # noqa: F401
)
from ._tensorboard_logger import HFSummaryWriter # noqa: F401
from ._webhooks_payload import (
WebhookPayload, # noqa: F401
WebhookPayloadComment, # noqa: F401
WebhookPayloadDiscussion, # noqa: F401
WebhookPayloadDiscussionChanges, # noqa: F401
WebhookPayloadEvent, # noqa: F401
WebhookPayloadMovedTo, # noqa: F401
WebhookPayloadRepo, # noqa: F401
WebhookPayloadUrl, # noqa: F401
WebhookPayloadWebhook, # noqa: F401
)
from ._webhooks_server import (
WebhooksServer, # noqa: F401
webhook_endpoint, # noqa: F401
)
from .community import (
Discussion, # noqa: F401
DiscussionComment, # noqa: F401
DiscussionCommit, # noqa: F401
DiscussionEvent, # noqa: F401
DiscussionStatusChange, # noqa: F401
DiscussionTitleChange, # noqa: F401
DiscussionWithDetails, # noqa: F401
)
from .constants import (
CONFIG_NAME, # noqa: F401
FLAX_WEIGHTS_NAME, # noqa: F401
HUGGINGFACE_CO_URL_HOME, # noqa: F401
HUGGINGFACE_CO_URL_TEMPLATE, # noqa: F401
PYTORCH_WEIGHTS_NAME, # noqa: F401
REPO_TYPE_DATASET, # noqa: F401
REPO_TYPE_MODEL, # noqa: F401
REPO_TYPE_SPACE, # noqa: F401
TF2_WEIGHTS_NAME, # noqa: F401
TF_WEIGHTS_NAME, # noqa: F401
)
from .fastai_utils import (
_save_pretrained_fastai, # noqa: F401
from_pretrained_fastai, # noqa: F401
push_to_hub_fastai, # noqa: F401
)
from .file_download import (
_CACHED_NO_EXIST, # noqa: F401
HfFileMetadata, # noqa: F401
get_hf_file_metadata, # noqa: F401
hf_hub_download, # noqa: F401
hf_hub_url, # noqa: F401
try_to_load_from_cache, # noqa: F401
)
from .hf_api import (
Collection, # noqa: F401
CollectionItem, # noqa: F401
CommitInfo, # noqa: F401
CommitOperation, # noqa: F401
CommitOperationAdd, # noqa: F401
CommitOperationCopy, # noqa: F401
CommitOperationDelete, # noqa: F401
DatasetInfo, # noqa: F401
GitCommitInfo, # noqa: F401
GitRefInfo, # noqa: F401
GitRefs, # noqa: F401
HfApi, # noqa: F401
ModelInfo, # noqa: F401
RepoUrl, # noqa: F401
SpaceInfo, # noqa: F401
User, # noqa: F401
UserLikes, # noqa: F401
WebhookInfo, # noqa: F401
WebhookWatchedItem, # noqa: F401
accept_access_request, # noqa: F401
add_collection_item, # noqa: F401
add_space_secret, # noqa: F401
add_space_variable, # noqa: F401
auth_check, # noqa: F401
cancel_access_request, # noqa: F401
change_discussion_status, # noqa: F401
comment_discussion, # noqa: F401
create_branch, # noqa: F401
create_collection, # noqa: F401
create_commit, # noqa: F401
create_discussion, # noqa: F401
create_inference_endpoint, # noqa: F401
create_pull_request, # noqa: F401
create_repo, # noqa: F401
create_tag, # noqa: F401
create_webhook, # noqa: F401
dataset_info, # noqa: F401
delete_branch, # noqa: F401
delete_collection, # noqa: F401
delete_collection_item, # noqa: F401
delete_file, # noqa: F401
delete_folder, # noqa: F401
delete_inference_endpoint, # noqa: F401
delete_repo, # noqa: F401
delete_space_secret, # noqa: F401
delete_space_storage, # noqa: F401
delete_space_variable, # noqa: F401
delete_tag, # noqa: F401
delete_webhook, # noqa: F401
disable_webhook, # noqa: F401
duplicate_space, # noqa: F401
edit_discussion_comment, # noqa: F401
enable_webhook, # noqa: F401
file_exists, # noqa: F401
get_collection, # noqa: F401
get_dataset_tags, # noqa: F401
get_discussion_details, # noqa: F401
get_full_repo_name, # noqa: F401
get_inference_endpoint, # noqa: F401
get_model_tags, # noqa: F401
get_paths_info, # noqa: F401
get_repo_discussions, # noqa: F401
get_safetensors_metadata, # noqa: F401
get_space_runtime, # noqa: F401
get_space_variables, # noqa: F401
get_token_permission, # noqa: F401
get_user_overview, # noqa: F401
get_webhook, # noqa: F401
grant_access, # noqa: F401
like, # noqa: F401
list_accepted_access_requests, # noqa: F401
list_collections, # noqa: F401
list_datasets, # noqa: F401
list_inference_endpoints, # noqa: F401
list_liked_repos, # noqa: F401
list_models, # noqa: F401
list_organization_members, # noqa: F401
list_papers, # noqa: F401
list_pending_access_requests, # noqa: F401
list_rejected_access_requests, # noqa: F401
list_repo_commits, # noqa: F401
list_repo_files, # noqa: F401
list_repo_likers, # noqa: F401
list_repo_refs, # noqa: F401
list_repo_tree, # noqa: F401
list_spaces, # noqa: F401
list_user_followers, # noqa: F401
list_user_following, # noqa: F401
list_webhooks, # noqa: F401
merge_pull_request, # noqa: F401
model_info, # noqa: F401
move_repo, # noqa: F401
paper_info, # noqa: F401
parse_safetensors_file_metadata, # noqa: F401
pause_inference_endpoint, # noqa: F401
pause_space, # noqa: F401
preupload_lfs_files, # noqa: F401
reject_access_request, # noqa: F401
rename_discussion, # noqa: F401
repo_exists, # noqa: F401
repo_info, # noqa: F401
repo_type_and_id_from_hf_id, # noqa: F401
request_space_hardware, # noqa: F401
request_space_storage, # noqa: F401
restart_space, # noqa: F401
resume_inference_endpoint, # noqa: F401
revision_exists, # noqa: F401
run_as_future, # noqa: F401
scale_to_zero_inference_endpoint, # noqa: F401
set_space_sleep_time, # noqa: F401
space_info, # noqa: F401
super_squash_history, # noqa: F401
unlike, # noqa: F401
update_collection_item, # noqa: F401
update_collection_metadata, # noqa: F401
update_inference_endpoint, # noqa: F401
update_repo_settings, # noqa: F401
update_repo_visibility, # noqa: F401
update_webhook, # noqa: F401
upload_file, # noqa: F401
upload_folder, # noqa: F401
upload_large_folder, # noqa: F401
whoami, # noqa: F401
)
from .hf_file_system import (
HfFileSystem, # noqa: F401
HfFileSystemFile, # noqa: F401
HfFileSystemResolvedPath, # noqa: F401
HfFileSystemStreamFile, # noqa: F401
)
from .hub_mixin import (
ModelHubMixin, # noqa: F401
PyTorchModelHubMixin, # noqa: F401
)
from .inference._client import (
InferenceClient, # noqa: F401
InferenceTimeoutError, # noqa: F401
)
from .inference._generated._async_client import AsyncInferenceClient # noqa: F401
from .inference._generated.types import (
AudioClassificationInput, # noqa: F401
AudioClassificationOutputElement, # noqa: F401
AudioClassificationOutputTransform, # noqa: F401
AudioClassificationParameters, # noqa: F401
AudioToAudioInput, # noqa: F401
AudioToAudioOutputElement, # noqa: F401
AutomaticSpeechRecognitionEarlyStoppingEnum, # noqa: F401
AutomaticSpeechRecognitionGenerationParameters, # noqa: F401
AutomaticSpeechRecognitionInput, # noqa: F401
AutomaticSpeechRecognitionOutput, # noqa: F401
AutomaticSpeechRecognitionOutputChunk, # noqa: F401
AutomaticSpeechRecognitionParameters, # noqa: F401
ChatCompletionInput, # noqa: F401
ChatCompletionInputFunctionDefinition, # noqa: F401
ChatCompletionInputFunctionName, # noqa: F401
ChatCompletionInputGrammarType, # noqa: F401
ChatCompletionInputGrammarTypeType, # noqa: F401
ChatCompletionInputMessage, # noqa: F401
ChatCompletionInputMessageChunk, # noqa: F401
ChatCompletionInputMessageChunkType, # noqa: F401
ChatCompletionInputStreamOptions, # noqa: F401
ChatCompletionInputTool, # noqa: F401
ChatCompletionInputToolChoiceClass, # noqa: F401
ChatCompletionInputToolChoiceEnum, # noqa: F401
ChatCompletionInputURL, # noqa: F401
ChatCompletionOutput, # noqa: F401
ChatCompletionOutputComplete, # noqa: F401
ChatCompletionOutputFunctionDefinition, # noqa: F401
ChatCompletionOutputLogprob, # noqa: F401
ChatCompletionOutputLogprobs, # noqa: F401
ChatCompletionOutputMessage, # noqa: F401
ChatCompletionOutputToolCall, # noqa: F401
ChatCompletionOutputTopLogprob, # noqa: F401
ChatCompletionOutputUsage, # noqa: F401
ChatCompletionStreamOutput, # noqa: F401
ChatCompletionStreamOutputChoice, # noqa: F401
ChatCompletionStreamOutputDelta, # noqa: F401
ChatCompletionStreamOutputDeltaToolCall, # noqa: F401
ChatCompletionStreamOutputFunction, # noqa: F401
ChatCompletionStreamOutputLogprob, # noqa: F401
ChatCompletionStreamOutputLogprobs, # noqa: F401
ChatCompletionStreamOutputTopLogprob, # noqa: F401
ChatCompletionStreamOutputUsage, # noqa: F401
DepthEstimationInput, # noqa: F401
DepthEstimationOutput, # noqa: F401
DocumentQuestionAnsweringInput, # noqa: F401
DocumentQuestionAnsweringInputData, # noqa: F401
DocumentQuestionAnsweringOutputElement, # noqa: F401
DocumentQuestionAnsweringParameters, # noqa: F401
FeatureExtractionInput, # noqa: F401
FeatureExtractionInputTruncationDirection, # noqa: F401
FillMaskInput, # noqa: F401
FillMaskOutputElement, # noqa: F401
FillMaskParameters, # noqa: F401
ImageClassificationInput, # noqa: F401
ImageClassificationOutputElement, # noqa: F401
ImageClassificationOutputTransform, # noqa: F401
ImageClassificationParameters, # noqa: F401
ImageSegmentationInput, # noqa: F401
ImageSegmentationOutputElement, # noqa: F401
ImageSegmentationParameters, # noqa: F401
ImageSegmentationSubtask, # noqa: F401
ImageToImageInput, # noqa: F401
ImageToImageOutput, # noqa: F401
ImageToImageParameters, # noqa: F401
ImageToImageTargetSize, # noqa: F401
ImageToTextEarlyStoppingEnum, # noqa: F401
ImageToTextGenerationParameters, # noqa: F401
ImageToTextInput, # noqa: F401
ImageToTextOutput, # noqa: F401
ImageToTextParameters, # noqa: F401
ObjectDetectionBoundingBox, # noqa: F401
ObjectDetectionInput, # noqa: F401
ObjectDetectionOutputElement, # noqa: F401
ObjectDetectionParameters, # noqa: F401
Padding, # noqa: F401
QuestionAnsweringInput, # noqa: F401
QuestionAnsweringInputData, # noqa: F401
QuestionAnsweringOutputElement, # noqa: F401
QuestionAnsweringParameters, # noqa: F401
SentenceSimilarityInput, # noqa: F401
SentenceSimilarityInputData, # noqa: F401
SummarizationInput, # noqa: F401
SummarizationOutput, # noqa: F401
SummarizationParameters, # noqa: F401
SummarizationTruncationStrategy, # noqa: F401
TableQuestionAnsweringInput, # noqa: F401
TableQuestionAnsweringInputData, # noqa: F401
TableQuestionAnsweringOutputElement, # noqa: F401
TableQuestionAnsweringParameters, # noqa: F401
Text2TextGenerationInput, # noqa: F401
Text2TextGenerationOutput, # noqa: F401
Text2TextGenerationParameters, # noqa: F401
Text2TextGenerationTruncationStrategy, # noqa: F401
TextClassificationInput, # noqa: F401
TextClassificationOutputElement, # noqa: F401
TextClassificationOutputTransform, # noqa: F401
TextClassificationParameters, # noqa: F401
TextGenerationInput, # noqa: F401
TextGenerationInputGenerateParameters, # noqa: F401
TextGenerationInputGrammarType, # noqa: F401
TextGenerationOutput, # noqa: F401
TextGenerationOutputBestOfSequence, # noqa: F401
TextGenerationOutputDetails, # noqa: F401
TextGenerationOutputFinishReason, # noqa: F401
TextGenerationOutputPrefillToken, # noqa: F401
TextGenerationOutputToken, # noqa: F401
TextGenerationStreamOutput, # noqa: F401
TextGenerationStreamOutputStreamDetails, # noqa: F401
TextGenerationStreamOutputToken, # noqa: F401
TextToAudioEarlyStoppingEnum, # noqa: F401
TextToAudioGenerationParameters, # noqa: F401
TextToAudioInput, # noqa: F401
TextToAudioOutput, # noqa: F401
TextToAudioParameters, # noqa: F401
TextToImageInput, # noqa: F401
TextToImageOutput, # noqa: F401
TextToImageParameters, # noqa: F401
TextToImageTargetSize, # noqa: F401
TextToSpeechEarlyStoppingEnum, # noqa: F401
TextToSpeechGenerationParameters, # noqa: F401
TextToSpeechInput, # noqa: F401
TextToSpeechOutput, # noqa: F401
TextToSpeechParameters, # noqa: F401
TokenClassificationAggregationStrategy, # noqa: F401
TokenClassificationInput, # noqa: F401
TokenClassificationOutputElement, # noqa: F401
TokenClassificationParameters, # noqa: F401
TranslationInput, # noqa: F401
TranslationOutput, # noqa: F401
TranslationParameters, # noqa: F401
TranslationTruncationStrategy, # noqa: F401
TypeEnum, # noqa: F401
VideoClassificationInput, # noqa: F401
VideoClassificationOutputElement, # noqa: F401
VideoClassificationOutputTransform, # noqa: F401
VideoClassificationParameters, # noqa: F401
VisualQuestionAnsweringInput, # noqa: F401
VisualQuestionAnsweringInputData, # noqa: F401
VisualQuestionAnsweringOutputElement, # noqa: F401
VisualQuestionAnsweringParameters, # noqa: F401
ZeroShotClassificationInput, # noqa: F401
ZeroShotClassificationOutputElement, # noqa: F401
ZeroShotClassificationParameters, # noqa: F401
ZeroShotImageClassificationInput, # noqa: F401
ZeroShotImageClassificationOutputElement, # noqa: F401
ZeroShotImageClassificationParameters, # noqa: F401
ZeroShotObjectDetectionBoundingBox, # noqa: F401
ZeroShotObjectDetectionInput, # noqa: F401
ZeroShotObjectDetectionOutputElement, # noqa: F401
ZeroShotObjectDetectionParameters, # noqa: F401
)
from .inference_api import InferenceApi # noqa: F401
from .keras_mixin import (
KerasModelHubMixin, # noqa: F401
from_pretrained_keras, # noqa: F401
push_to_hub_keras, # noqa: F401
save_pretrained_keras, # noqa: F401
)
from .repocard import (
DatasetCard, # noqa: F401
ModelCard, # noqa: F401
RepoCard, # noqa: F401
SpaceCard, # noqa: F401
metadata_eval_result, # noqa: F401
metadata_load, # noqa: F401
metadata_save, # noqa: F401
metadata_update, # noqa: F401
)
from .repocard_data import (
CardData, # noqa: F401
DatasetCardData, # noqa: F401
EvalResult, # noqa: F401
ModelCardData, # noqa: F401
SpaceCardData, # noqa: F401
)
from .repository import Repository # noqa: F401
from .serialization import (
StateDictSplit, # noqa: F401
get_tf_storage_size, # noqa: F401
get_torch_storage_id, # noqa: F401
get_torch_storage_size, # noqa: F401
load_state_dict_from_file, # noqa: F401
load_torch_model, # noqa: F401
save_torch_model, # noqa: F401
save_torch_state_dict, # noqa: F401
split_state_dict_into_shards_factory, # noqa: F401
split_tf_state_dict_into_shards, # noqa: F401
split_torch_state_dict_into_shards, # noqa: F401
)
from .serialization._dduf import (
DDUFEntry, # noqa: F401
export_entries_as_dduf, # noqa: F401
export_folder_as_dduf, # noqa: F401
read_dduf_file, # noqa: F401
)
from .utils import (
CachedFileInfo, # noqa: F401
CachedRepoInfo, # noqa: F401
CachedRevisionInfo, # noqa: F401
CacheNotFound, # noqa: F401
CorruptedCacheException, # noqa: F401
DeleteCacheStrategy, # noqa: F401
HFCacheInfo, # noqa: F401
HfFolder, # noqa: F401
cached_assets_path, # noqa: F401
configure_http_backend, # noqa: F401
dump_environment_info, # noqa: F401
get_session, # noqa: F401
get_token, # noqa: F401
logging, # noqa: F401
scan_cache_dir, # noqa: F401
) | if_statement | 18,838 | 37,217 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/__init__.py | null |
class LocalDownloadFilePaths:
"""
Paths to the files related to a download process in a local dir.
Returned by [`get_local_download_paths`].
Attributes:
file_path (`Path`):
Path where the file will be saved.
lock_path (`Path`):
Path to the lock file used to ensure atomicity when reading/writing metadata.
metadata_path (`Path`):
Path to the metadata file.
"""
file_path: Path
lock_path: Path
metadata_path: Path
def incomplete_path(self, etag: str) -> Path:
"""Return the path where a file will be temporarily downloaded before being moved to `file_path`."""
return self.metadata_path.with_suffix(f".{etag}.incomplete") | class_definition | 1,891 | 2,627 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
def incomplete_path(self, etag: str) -> Path:
"""Return the path where a file will be temporarily downloaded before being moved to `file_path`."""
return self.metadata_path.with_suffix(f".{etag}.incomplete") | function_definition | 2,404 | 2,627 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | LocalDownloadFilePaths |
class LocalUploadFilePaths:
"""
Paths to the files related to an upload process in a local dir.
Returned by [`get_local_upload_paths`].
Attributes:
path_in_repo (`str`):
Path of the file in the repo.
file_path (`Path`):
Path where the file will be saved.
lock_path (`Path`):
Path to the lock file used to ensure atomicity when reading/writing metadata.
metadata_path (`Path`):
Path to the metadata file.
"""
path_in_repo: str
file_path: Path
lock_path: Path
metadata_path: Path | class_definition | 2,654 | 3,250 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
class LocalDownloadFileMetadata:
"""
Metadata about a file in the local directory related to a download process.
Attributes:
filename (`str`):
Path of the file in the repo.
commit_hash (`str`):
Commit hash of the file in the repo.
etag (`str`):
ETag of the file in the repo. Used to check if the file has changed.
For LFS files, this is the sha256 of the file. For regular files, it corresponds to the git hash.
timestamp (`int`):
Unix timestamp of when the metadata was saved i.e. when the metadata was accurate.
"""
filename: str
commit_hash: str
etag: str
timestamp: float | class_definition | 3,264 | 3,965 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
class LocalUploadFileMetadata:
"""
Metadata about a file in the local directory related to an upload process.
"""
size: int
# Default values correspond to "we don't know yet"
timestamp: Optional[float] = None
should_ignore: Optional[bool] = None
sha256: Optional[str] = None
upload_mode: Optional[str] = None
is_uploaded: bool = False
is_committed: bool = False
def save(self, paths: LocalUploadFilePaths) -> None:
"""Save the metadata to disk."""
with WeakFileLock(paths.lock_path):
with paths.metadata_path.open("w") as f:
new_timestamp = time.time()
f.write(str(new_timestamp) + "\n")
f.write(str(self.size)) # never None
f.write("\n")
if self.should_ignore is not None:
f.write(str(int(self.should_ignore)))
f.write("\n")
if self.sha256 is not None:
f.write(self.sha256)
f.write("\n")
if self.upload_mode is not None:
f.write(self.upload_mode)
f.write("\n")
f.write(str(int(self.is_uploaded)) + "\n")
f.write(str(int(self.is_committed)) + "\n")
self.timestamp = new_timestamp | class_definition | 3,979 | 5,308 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
def save(self, paths: LocalUploadFilePaths) -> None:
"""Save the metadata to disk."""
with WeakFileLock(paths.lock_path):
with paths.metadata_path.open("w") as f:
new_timestamp = time.time()
f.write(str(new_timestamp) + "\n")
f.write(str(self.size)) # never None
f.write("\n")
if self.should_ignore is not None:
f.write(str(int(self.should_ignore)))
f.write("\n")
if self.sha256 is not None:
f.write(self.sha256)
f.write("\n")
if self.upload_mode is not None:
f.write(self.upload_mode)
f.write("\n")
f.write(str(int(self.is_uploaded)) + "\n")
f.write(str(int(self.is_committed)) + "\n")
self.timestamp = new_timestamp | function_definition | 4,392 | 5,308 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | LocalUploadFileMetadata |
if self.should_ignore is not None:
f.write(str(int(self.should_ignore))) | if_statement | 4,780 | 4,872 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | LocalUploadFileMetadata |
if self.sha256 is not None:
f.write(self.sha256) | if_statement | 4,920 | 4,988 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | LocalUploadFileMetadata |
if self.upload_mode is not None:
f.write(self.upload_mode) | if_statement | 5,036 | 5,114 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | LocalUploadFileMetadata |
def get_local_download_paths(local_dir: Path, filename: str) -> LocalDownloadFilePaths:
"""Compute paths to the files related to a download process.
Folders containing the paths are all guaranteed to exist.
Args:
local_dir (`Path`):
Path to the local directory in which files are downloaded.
filename (`str`):
Path of the file in the repo.
Return:
[`LocalDownloadFilePaths`]: the paths to the files (file_path, lock_path, metadata_path, incomplete_path).
"""
# filename is the path in the Hub repository (separated by '/')
# make sure to have a cross platform transcription
sanitized_filename = os.path.join(*filename.split("/"))
if os.name == "nt":
if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
raise ValueError(
f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
" owner to rename this file."
)
file_path = local_dir / sanitized_filename
metadata_path = _huggingface_dir(local_dir) / "download" / f"{sanitized_filename}.metadata"
lock_path = metadata_path.with_suffix(".lock")
# Some Windows versions do not allow for paths longer than 255 characters.
# In this case, we must specify it as an extended path by using the "\\?\" prefix
if os.name == "nt":
if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
file_path = Path("\\\\?\\" + os.path.abspath(file_path))
lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path))
file_path.parent.mkdir(parents=True, exist_ok=True)
metadata_path.parent.mkdir(parents=True, exist_ok=True)
return LocalDownloadFilePaths(file_path=file_path, lock_path=lock_path, metadata_path=metadata_path) | function_definition | 5,311 | 7,258 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if os.name == "nt":
if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
raise ValueError(
f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
" owner to rename this file."
) | if_statement | 6,028 | 6,342 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
raise ValueError(
f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
" owner to rename this file."
) | if_statement | 6,056 | 6,342 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if os.name == "nt":
if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
file_path = Path("\\\\?\\" + os.path.abspath(file_path))
lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path)) | if_statement | 6,707 | 7,036 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
file_path = Path("\\\\?\\" + os.path.abspath(file_path))
lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path)) | if_statement | 6,735 | 7,036 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
def get_local_upload_paths(local_dir: Path, filename: str) -> LocalUploadFilePaths:
"""Compute paths to the files related to an upload process.
Folders containing the paths are all guaranteed to exist.
Args:
local_dir (`Path`):
Path to the local directory that is uploaded.
filename (`str`):
Path of the file in the repo.
Return:
[`LocalUploadFilePaths`]: the paths to the files (file_path, lock_path, metadata_path).
"""
# filename is the path in the Hub repository (separated by '/')
# make sure to have a cross platform transcription
sanitized_filename = os.path.join(*filename.split("/"))
if os.name == "nt":
if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
raise ValueError(
f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
" owner to rename this file."
)
file_path = local_dir / sanitized_filename
metadata_path = _huggingface_dir(local_dir) / "upload" / f"{sanitized_filename}.metadata"
lock_path = metadata_path.with_suffix(".lock")
# Some Windows versions do not allow for paths longer than 255 characters.
# In this case, we must specify it as an extended path by using the "\\?\" prefix
if os.name == "nt":
if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
file_path = Path("\\\\?\\" + os.path.abspath(file_path))
lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path))
file_path.parent.mkdir(parents=True, exist_ok=True)
metadata_path.parent.mkdir(parents=True, exist_ok=True)
return LocalUploadFilePaths(
path_in_repo=filename, file_path=file_path, lock_path=lock_path, metadata_path=metadata_path
) | function_definition | 7,261 | 9,204 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if os.name == "nt":
if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
raise ValueError(
f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
" owner to rename this file."
) | if_statement | 7,941 | 8,255 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
raise ValueError(
f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
" owner to rename this file."
) | if_statement | 7,969 | 8,255 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if os.name == "nt":
if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
file_path = Path("\\\\?\\" + os.path.abspath(file_path))
lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path)) | if_statement | 8,618 | 8,947 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
file_path = Path("\\\\?\\" + os.path.abspath(file_path))
lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path)) | if_statement | 8,646 | 8,947 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
def read_download_metadata(local_dir: Path, filename: str) -> Optional[LocalDownloadFileMetadata]:
"""Read metadata about a file in the local directory related to a download process.
Args:
local_dir (`Path`):
Path to the local directory in which files are downloaded.
filename (`str`):
Path of the file in the repo.
Return:
`[LocalDownloadFileMetadata]` or `None`: the metadata if it exists, `None` otherwise.
"""
paths = get_local_download_paths(local_dir, filename)
with WeakFileLock(paths.lock_path):
if paths.metadata_path.exists():
try:
with paths.metadata_path.open() as f:
commit_hash = f.readline().strip()
etag = f.readline().strip()
timestamp = float(f.readline().strip())
metadata = LocalDownloadFileMetadata(
filename=filename,
commit_hash=commit_hash,
etag=etag,
timestamp=timestamp,
)
except Exception as e:
# remove the metadata file if it is corrupted / not the right format
logger.warning(
f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
)
try:
paths.metadata_path.unlink()
except Exception as e:
logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
try:
# check if the file exists and hasn't been modified since the metadata was saved
stat = paths.file_path.stat()
if (
stat.st_mtime - 1 <= metadata.timestamp
): # allow 1s difference as stat.st_mtime might not be precise
return metadata
logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
except FileNotFoundError:
# file does not exist => metadata is outdated
return None
return None | function_definition | 9,207 | 11,408 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if paths.metadata_path.exists():
try:
with paths.metadata_path.open() as f:
commit_hash = f.readline().strip()
etag = f.readline().strip()
timestamp = float(f.readline().strip())
metadata = LocalDownloadFileMetadata(
filename=filename,
commit_hash=commit_hash,
etag=etag,
timestamp=timestamp,
)
except Exception as e:
# remove the metadata file if it is corrupted / not the right format
logger.warning(
f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
)
try:
paths.metadata_path.unlink()
except Exception as e:
logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
try:
# check if the file exists and hasn't been modified since the metadata was saved
stat = paths.file_path.stat()
if (
stat.st_mtime - 1 <= metadata.timestamp
): # allow 1s difference as stat.st_mtime might not be precise
return metadata
logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
except FileNotFoundError:
# file does not exist => metadata is outdated
return None | if_statement | 9,793 | 11,392 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
try:
with paths.metadata_path.open() as f:
commit_hash = f.readline().strip()
etag = f.readline().strip()
timestamp = float(f.readline().strip())
metadata = LocalDownloadFileMetadata(
filename=filename,
commit_hash=commit_hash,
etag=etag,
timestamp=timestamp,
)
except Exception as e:
# remove the metadata file if it is corrupted / not the right format
logger.warning(
f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
)
try:
paths.metadata_path.unlink()
except Exception as e:
logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}") | try_statement | 9,838 | 10,806 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
try:
paths.metadata_path.unlink()
except Exception as e:
logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}") | try_statement | 10,607 | 10,806 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
try:
# check if the file exists and hasn't been modified since the metadata was saved
stat = paths.file_path.stat()
if (
stat.st_mtime - 1 <= metadata.timestamp
): # allow 1s difference as stat.st_mtime might not be precise
return metadata
logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
except FileNotFoundError:
# file does not exist => metadata is outdated
return None | try_statement | 10,820 | 11,392 | 2 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if (
stat.st_mtime - 1 <= metadata.timestamp
): # allow 1s difference as stat.st_mtime might not be precise
return metadata | if_statement | 10,984 | 11,164 | 3 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
def read_upload_metadata(local_dir: Path, filename: str) -> LocalUploadFileMetadata:
"""Read metadata about a file in the local directory related to an upload process.
TODO: factorize logic with `read_download_metadata`.
Args:
local_dir (`Path`):
Path to the local directory in which files are downloaded.
filename (`str`):
Path of the file in the repo.
Return:
`[LocalUploadFileMetadata]` or `None`: the metadata if it exists, `None` otherwise.
"""
paths = get_local_upload_paths(local_dir, filename)
with WeakFileLock(paths.lock_path):
if paths.metadata_path.exists():
try:
with paths.metadata_path.open() as f:
timestamp = float(f.readline().strip())
size = int(f.readline().strip()) # never None
_should_ignore = f.readline().strip()
should_ignore = None if _should_ignore == "" else bool(int(_should_ignore))
_sha256 = f.readline().strip()
sha256 = None if _sha256 == "" else _sha256
_upload_mode = f.readline().strip()
upload_mode = None if _upload_mode == "" else _upload_mode
if upload_mode not in (None, "regular", "lfs"):
raise ValueError(f"Invalid upload mode in metadata {paths.path_in_repo}: {upload_mode}")
is_uploaded = bool(int(f.readline().strip()))
is_committed = bool(int(f.readline().strip()))
metadata = LocalUploadFileMetadata(
timestamp=timestamp,
size=size,
should_ignore=should_ignore,
sha256=sha256,
upload_mode=upload_mode,
is_uploaded=is_uploaded,
is_committed=is_committed,
)
except Exception as e:
# remove the metadata file if it is corrupted / not the right format
logger.warning(
f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
)
try:
paths.metadata_path.unlink()
except Exception as e:
logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
# TODO: can we do better?
if (
metadata.timestamp is not None
and metadata.is_uploaded # file was uploaded
and not metadata.is_committed # but not committed
and time.time() - metadata.timestamp > 20 * 3600 # and it's been more than 20 hours
): # => we consider it as garbage-collected by S3
metadata.is_uploaded = False
# check if the file exists and hasn't been modified since the metadata was saved
try:
if metadata.timestamp is not None and paths.file_path.stat().st_mtime <= metadata.timestamp:
return metadata
logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
except FileNotFoundError:
# file does not exist => metadata is outdated
pass
# empty metadata => we don't know anything expect its size
return LocalUploadFileMetadata(size=paths.file_path.stat().st_size) | function_definition | 11,411 | 14,938 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |
if paths.metadata_path.exists():
try:
with paths.metadata_path.open() as f:
timestamp = float(f.readline().strip())
size = int(f.readline().strip()) # never None
_should_ignore = f.readline().strip()
should_ignore = None if _should_ignore == "" else bool(int(_should_ignore))
_sha256 = f.readline().strip()
sha256 = None if _sha256 == "" else _sha256
_upload_mode = f.readline().strip()
upload_mode = None if _upload_mode == "" else _upload_mode
if upload_mode not in (None, "regular", "lfs"):
raise ValueError(f"Invalid upload mode in metadata {paths.path_in_repo}: {upload_mode}")
is_uploaded = bool(int(f.readline().strip()))
is_committed = bool(int(f.readline().strip()))
metadata = LocalUploadFileMetadata(
timestamp=timestamp,
size=size,
should_ignore=should_ignore,
sha256=sha256,
upload_mode=upload_mode,
is_uploaded=is_uploaded,
is_committed=is_committed,
)
except Exception as e:
# remove the metadata file if it is corrupted / not the right format
logger.warning(
f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
)
try:
paths.metadata_path.unlink()
except Exception as e:
logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
# TODO: can we do better?
if (
metadata.timestamp is not None
and metadata.is_uploaded # file was uploaded
and not metadata.is_committed # but not committed
and time.time() - metadata.timestamp > 20 * 3600 # and it's been more than 20 hours
): # => we consider it as garbage-collected by S3
metadata.is_uploaded = False
# check if the file exists and hasn't been modified since the metadata was saved
try:
if metadata.timestamp is not None and paths.file_path.stat().st_mtime <= metadata.timestamp:
return metadata
logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
except FileNotFoundError:
# file does not exist => metadata is outdated
pass | if_statement | 12,036 | 14,802 | 1 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_local_folder.py | null |