text
stringlengths 38
361k
| type
stringclasses 1
value | start
int64 156
155k
| end
int64 451
418k
| depth
int64 0
0
| filepath
stringlengths 87
141
| parent_class
null | class_index
int64 0
305
|
---|---|---|---|---|---|---|---|
class DDUFError(Exception):
"""Base exception for errors related to the DDUF format.""" | class_definition | 9,305 | 9,396 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/errors.py | null | 100 |
class DDUFCorruptedFileError(DDUFError):
"""Exception thrown when the DDUF file is corrupted.""" | class_definition | 9,399 | 9,499 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/errors.py | null | 101 |
class DDUFExportError(DDUFError):
"""Base exception for errors during DDUF export.""" | class_definition | 9,502 | 9,591 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/errors.py | null | 102 |
class DDUFInvalidEntryNameError(DDUFExportError):
"""Exception thrown when the entry name is invalid.""" | class_definition | 9,594 | 9,702 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/errors.py | null | 103 |
class WorkerJob(enum.Enum):
SHA256 = enum.auto()
GET_UPLOAD_MODE = enum.auto()
PREUPLOAD_LFS = enum.auto()
COMMIT = enum.auto()
WAIT = enum.auto() # if no tasks are available but we don't want to exit | class_definition | 5,090 | 5,311 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_upload_large_folder.py | null | 104 |
class LargeUploadStatus:
"""Contains information, queues and tasks for a large upload process."""
def __init__(self, items: List[JOB_ITEM_T]):
self.items = items
self.queue_sha256: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
self.queue_get_upload_mode: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
self.queue_preupload_lfs: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
self.queue_commit: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
self.lock = Lock()
self.nb_workers_sha256: int = 0
self.nb_workers_get_upload_mode: int = 0
self.nb_workers_preupload_lfs: int = 0
self.nb_workers_commit: int = 0
self.nb_workers_waiting: int = 0
self.last_commit_attempt: Optional[float] = None
self._started_at = datetime.now()
# Setup queues
for item in self.items:
paths, metadata = item
if metadata.sha256 is None:
self.queue_sha256.put(item)
elif metadata.upload_mode is None:
self.queue_get_upload_mode.put(item)
elif metadata.upload_mode == "lfs" and not metadata.is_uploaded:
self.queue_preupload_lfs.put(item)
elif not metadata.is_committed:
self.queue_commit.put(item)
else:
logger.debug(f"Skipping file {paths.path_in_repo} (already uploaded and committed)")
def current_report(self) -> str:
"""Generate a report of the current status of the large upload."""
nb_hashed = 0
size_hashed = 0
nb_preuploaded = 0
nb_lfs = 0
nb_lfs_unsure = 0
size_preuploaded = 0
nb_committed = 0
size_committed = 0
total_size = 0
ignored_files = 0
total_files = 0
with self.lock:
for _, metadata in self.items:
if metadata.should_ignore:
ignored_files += 1
continue
total_size += metadata.size
total_files += 1
if metadata.sha256 is not None:
nb_hashed += 1
size_hashed += metadata.size
if metadata.upload_mode == "lfs":
nb_lfs += 1
if metadata.upload_mode is None:
nb_lfs_unsure += 1
if metadata.is_uploaded:
nb_preuploaded += 1
size_preuploaded += metadata.size
if metadata.is_committed:
nb_committed += 1
size_committed += metadata.size
total_size_str = _format_size(total_size)
now = datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M:%S")
elapsed = now - self._started_at
elapsed_str = str(elapsed).split(".")[0] # remove milliseconds
message = "\n" + "-" * 10
message += f" {now_str} ({elapsed_str}) "
message += "-" * 10 + "\n"
message += "Files: "
message += f"hashed {nb_hashed}/{total_files} ({_format_size(size_hashed)}/{total_size_str}) | "
message += f"pre-uploaded: {nb_preuploaded}/{nb_lfs} ({_format_size(size_preuploaded)}/{total_size_str})"
if nb_lfs_unsure > 0:
message += f" (+{nb_lfs_unsure} unsure)"
message += f" | committed: {nb_committed}/{total_files} ({_format_size(size_committed)}/{total_size_str})"
message += f" | ignored: {ignored_files}\n"
message += "Workers: "
message += f"hashing: {self.nb_workers_sha256} | "
message += f"get upload mode: {self.nb_workers_get_upload_mode} | "
message += f"pre-uploading: {self.nb_workers_preupload_lfs} | "
message += f"committing: {self.nb_workers_commit} | "
message += f"waiting: {self.nb_workers_waiting}\n"
message += "-" * 51
return message
def is_done(self) -> bool:
with self.lock:
return all(metadata.is_committed or metadata.should_ignore for _, metadata in self.items) | class_definition | 5,382 | 9,545 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_upload_large_folder.py | null | 105 |
class HackyCommitOperationAdd(CommitOperationAdd):
def __post_init__(self) -> None:
if isinstance(self.path_or_fileobj, Path):
self.path_or_fileobj = str(self.path_or_fileobj) | class_definition | 20,659 | 20,858 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_upload_large_folder.py | null | 106 |
class RepoCard:
card_data_class = CardData
default_template_path = TEMPLATE_MODELCARD_PATH
repo_type = "model"
def __init__(self, content: str, ignore_metadata_errors: bool = False):
"""Initialize a RepoCard from string content. The content should be a
Markdown file with a YAML block at the beginning and a Markdown body.
Args:
content (`str`): The content of the Markdown file.
Example:
```python
>>> from huggingface_hub.repocard import RepoCard
>>> text = '''
... ---
... language: en
... license: mit
... ---
...
... # My repo
... '''
>>> card = RepoCard(text)
>>> card.data.to_dict()
{'language': 'en', 'license': 'mit'}
>>> card.text
'\\n# My repo\\n'
```
<Tip>
Raises the following error:
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
when the content of the repo card metadata is not a dictionary.
</Tip>
"""
# Set the content of the RepoCard, as well as underlying .data and .text attributes.
# See the `content` property setter for more details.
self.ignore_metadata_errors = ignore_metadata_errors
self.content = content
@property
def content(self):
"""The content of the RepoCard, including the YAML block and the Markdown body."""
line_break = _detect_line_ending(self._content) or "\n"
return f"---{line_break}{self.data.to_yaml(line_break=line_break, original_order=self._original_order)}{line_break}---{line_break}{self.text}"
@content.setter
def content(self, content: str):
"""Set the content of the RepoCard."""
self._content = content
match = REGEX_YAML_BLOCK.search(content)
if match:
# Metadata found in the YAML block
yaml_block = match.group(2)
self.text = content[match.end() :]
data_dict = yaml.safe_load(yaml_block)
if data_dict is None:
data_dict = {}
# The YAML block's data should be a dictionary
if not isinstance(data_dict, dict):
raise ValueError("repo card metadata block should be a dict")
else:
# Model card without metadata... create empty metadata
logger.warning("Repo card metadata block was not found. Setting CardData to empty.")
data_dict = {}
self.text = content
self.data = self.card_data_class(**data_dict, ignore_metadata_errors=self.ignore_metadata_errors)
self._original_order = list(data_dict.keys())
def __str__(self):
return self.content
def save(self, filepath: Union[Path, str]):
r"""Save a RepoCard to a file.
Args:
filepath (`Union[Path, str]`): Filepath to the markdown file to save.
Example:
```python
>>> from huggingface_hub.repocard import RepoCard
>>> card = RepoCard("---\nlanguage: en\n---\n# This is a test repo card")
>>> card.save("/tmp/test.md")
```
"""
filepath = Path(filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
# Preserve newlines as in the existing file.
with open(filepath, mode="w", newline="", encoding="utf-8") as f:
f.write(str(self))
@classmethod
def load(
cls,
repo_id_or_path: Union[str, Path],
repo_type: Optional[str] = None,
token: Optional[str] = None,
ignore_metadata_errors: bool = False,
):
"""Initialize a RepoCard from a Hugging Face Hub repo's README.md or a local filepath.
Args:
repo_id_or_path (`Union[str, Path]`):
The repo ID associated with a Hugging Face Hub repo or a local filepath.
repo_type (`str`, *optional*):
The type of Hugging Face repo to push to. Defaults to None, which will use use "model". Other options
are "dataset" and "space". Not used when loading from a local filepath. If this is called from a child
class, the default value will be the child class's `repo_type`.
token (`str`, *optional*):
Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token.
ignore_metadata_errors (`str`):
If True, errors while parsing the metadata section will be ignored. Some information might be lost during
the process. Use it at your own risk.
Returns:
[`huggingface_hub.repocard.RepoCard`]: The RepoCard (or subclass) initialized from the repo's
README.md file or filepath.
Example:
```python
>>> from huggingface_hub.repocard import RepoCard
>>> card = RepoCard.load("nateraw/food")
>>> assert card.data.tags == ["generated_from_trainer", "image-classification", "pytorch"]
```
"""
if Path(repo_id_or_path).exists():
card_path = Path(repo_id_or_path)
elif isinstance(repo_id_or_path, str):
card_path = Path(
hf_hub_download(
repo_id_or_path,
constants.REPOCARD_NAME,
repo_type=repo_type or cls.repo_type,
token=token,
)
)
else:
raise ValueError(f"Cannot load RepoCard: path not found on disk ({repo_id_or_path}).")
# Preserve newlines in the existing file.
with card_path.open(mode="r", newline="", encoding="utf-8") as f:
return cls(f.read(), ignore_metadata_errors=ignore_metadata_errors)
def validate(self, repo_type: Optional[str] = None):
"""Validates card against Hugging Face Hub's card validation logic.
Using this function requires access to the internet, so it is only called
internally by [`huggingface_hub.repocard.RepoCard.push_to_hub`].
Args:
repo_type (`str`, *optional*, defaults to "model"):
The type of Hugging Face repo to push to. Options are "model", "dataset", and "space".
If this function is called from a child class, the default will be the child class's `repo_type`.
<Tip>
Raises the following errors:
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
if the card fails validation checks.
- [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
if the request to the Hub API fails for any other reason.
</Tip>
"""
# If repo type is provided, otherwise, use the repo type of the card.
repo_type = repo_type or self.repo_type
body = {
"repoType": repo_type,
"content": str(self),
}
headers = {"Accept": "text/plain"}
try:
r = get_session().post("https://huggingface.co/api/validate-yaml", body, headers=headers)
r.raise_for_status()
except requests.exceptions.HTTPError as exc:
if r.status_code == 400:
raise ValueError(r.text)
else:
raise exc
def push_to_hub(
self,
repo_id: str,
token: Optional[str] = None,
repo_type: Optional[str] = None,
commit_message: Optional[str] = None,
commit_description: Optional[str] = None,
revision: Optional[str] = None,
create_pr: Optional[bool] = None,
parent_commit: Optional[str] = None,
):
"""Push a RepoCard to a Hugging Face Hub repo.
Args:
repo_id (`str`):
The repo ID of the Hugging Face Hub repo to push to. Example: "nateraw/food".
token (`str`, *optional*):
Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to
the stored token.
repo_type (`str`, *optional*, defaults to "model"):
The type of Hugging Face repo to push to. Options are "model", "dataset", and "space". If this
function is called by a child class, it will default to the child class's `repo_type`.
commit_message (`str`, *optional*):
The summary / title / first line of the generated commit.
commit_description (`str`, *optional*)
The description of the generated commit.
revision (`str`, *optional*):
The git revision to commit from. Defaults to the head of the `"main"` branch.
create_pr (`bool`, *optional*):
Whether or not to create a Pull Request with this commit. Defaults to `False`.
parent_commit (`str`, *optional*):
The OID / SHA of the parent commit, as a hexadecimal string. Shorthands (7 first characters) are also supported.
If specified and `create_pr` is `False`, the commit will fail if `revision` does not point to `parent_commit`.
If specified and `create_pr` is `True`, the pull request will be created from `parent_commit`.
Specifying `parent_commit` ensures the repo has not changed before committing the changes, and can be
especially useful if the repo is updated / committed to concurrently.
Returns:
`str`: URL of the commit which updated the card metadata.
"""
# If repo type is provided, otherwise, use the repo type of the card.
repo_type = repo_type or self.repo_type
# Validate card before pushing to hub
self.validate(repo_type=repo_type)
with SoftTemporaryDirectory() as tmpdir:
tmp_path = Path(tmpdir) / constants.REPOCARD_NAME
tmp_path.write_text(str(self))
url = upload_file(
path_or_fileobj=str(tmp_path),
path_in_repo=constants.REPOCARD_NAME,
repo_id=repo_id,
token=token,
repo_type=repo_type,
commit_message=commit_message,
commit_description=commit_description,
create_pr=create_pr,
revision=revision,
parent_commit=parent_commit,
)
return url
@classmethod
def from_template(
cls,
card_data: CardData,
template_path: Optional[str] = None,
template_str: Optional[str] = None,
**template_kwargs,
):
"""Initialize a RepoCard from a template. By default, it uses the default template.
Templates are Jinja2 templates that can be customized by passing keyword arguments.
Args:
card_data (`huggingface_hub.CardData`):
A huggingface_hub.CardData instance containing the metadata you want to include in the YAML
header of the repo card on the Hugging Face Hub.
template_path (`str`, *optional*):
A path to a markdown file with optional Jinja template variables that can be filled
in with `template_kwargs`. Defaults to the default template.
Returns:
[`huggingface_hub.repocard.RepoCard`]: A RepoCard instance with the specified card data and content from the
template.
"""
if is_jinja_available():
import jinja2
else:
raise ImportError(
"Using RepoCard.from_template requires Jinja2 to be installed. Please"
" install it with `pip install Jinja2`."
)
kwargs = card_data.to_dict().copy()
kwargs.update(template_kwargs) # Template_kwargs have priority
if template_path is not None:
template_str = Path(template_path).read_text()
if template_str is None:
template_str = Path(cls.default_template_path).read_text()
template = jinja2.Template(template_str)
content = template.render(card_data=card_data.to_yaml(), **kwargs)
return cls(content) | class_definition | 1,118 | 13,472 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/repocard.py | null | 107 |
class ModelCard(RepoCard):
card_data_class = ModelCardData
default_template_path = TEMPLATE_MODELCARD_PATH
repo_type = "model"
@classmethod
def from_template( # type: ignore # violates Liskov property but easier to use
cls,
card_data: ModelCardData,
template_path: Optional[str] = None,
template_str: Optional[str] = None,
**template_kwargs,
):
"""Initialize a ModelCard from a template. By default, it uses the default template, which can be found here:
https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md
Templates are Jinja2 templates that can be customized by passing keyword arguments.
Args:
card_data (`huggingface_hub.ModelCardData`):
A huggingface_hub.ModelCardData instance containing the metadata you want to include in the YAML
header of the model card on the Hugging Face Hub.
template_path (`str`, *optional*):
A path to a markdown file with optional Jinja template variables that can be filled
in with `template_kwargs`. Defaults to the default template.
Returns:
[`huggingface_hub.ModelCard`]: A ModelCard instance with the specified card data and content from the
template.
Example:
```python
>>> from huggingface_hub import ModelCard, ModelCardData, EvalResult
>>> # Using the Default Template
>>> card_data = ModelCardData(
... language='en',
... license='mit',
... library_name='timm',
... tags=['image-classification', 'resnet'],
... datasets=['beans'],
... metrics=['accuracy'],
... )
>>> card = ModelCard.from_template(
... card_data,
... model_description='This model does x + y...'
... )
>>> # Including Evaluation Results
>>> card_data = ModelCardData(
... language='en',
... tags=['image-classification', 'resnet'],
... eval_results=[
... EvalResult(
... task_type='image-classification',
... dataset_type='beans',
... dataset_name='Beans',
... metric_type='accuracy',
... metric_value=0.9,
... ),
... ],
... model_name='my-cool-model',
... )
>>> card = ModelCard.from_template(card_data)
>>> # Using a Custom Template
>>> card_data = ModelCardData(
... language='en',
... tags=['image-classification', 'resnet']
... )
>>> card = ModelCard.from_template(
... card_data=card_data,
... template_path='./src/huggingface_hub/templates/modelcard_template.md',
... custom_template_var='custom value', # will be replaced in template if it exists
... )
```
"""
return super().from_template(card_data, template_path, template_str, **template_kwargs) | class_definition | 13,475 | 16,801 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/repocard.py | null | 108 |
class DatasetCard(RepoCard):
card_data_class = DatasetCardData
default_template_path = TEMPLATE_DATASETCARD_PATH
repo_type = "dataset"
@classmethod
def from_template( # type: ignore # violates Liskov property but easier to use
cls,
card_data: DatasetCardData,
template_path: Optional[str] = None,
template_str: Optional[str] = None,
**template_kwargs,
):
"""Initialize a DatasetCard from a template. By default, it uses the default template, which can be found here:
https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md
Templates are Jinja2 templates that can be customized by passing keyword arguments.
Args:
card_data (`huggingface_hub.DatasetCardData`):
A huggingface_hub.DatasetCardData instance containing the metadata you want to include in the YAML
header of the dataset card on the Hugging Face Hub.
template_path (`str`, *optional*):
A path to a markdown file with optional Jinja template variables that can be filled
in with `template_kwargs`. Defaults to the default template.
Returns:
[`huggingface_hub.DatasetCard`]: A DatasetCard instance with the specified card data and content from the
template.
Example:
```python
>>> from huggingface_hub import DatasetCard, DatasetCardData
>>> # Using the Default Template
>>> card_data = DatasetCardData(
... language='en',
... license='mit',
... annotations_creators='crowdsourced',
... task_categories=['text-classification'],
... task_ids=['sentiment-classification', 'text-scoring'],
... multilinguality='monolingual',
... pretty_name='My Text Classification Dataset',
... )
>>> card = DatasetCard.from_template(
... card_data,
... pretty_name=card_data.pretty_name,
... )
>>> # Using a Custom Template
>>> card_data = DatasetCardData(
... language='en',
... license='mit',
... )
>>> card = DatasetCard.from_template(
... card_data=card_data,
... template_path='./src/huggingface_hub/templates/datasetcard_template.md',
... custom_template_var='custom value', # will be replaced in template if it exists
... )
```
"""
return super().from_template(card_data, template_path, template_str, **template_kwargs) | class_definition | 16,804 | 19,555 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/repocard.py | null | 109 |
class SpaceCard(RepoCard):
card_data_class = SpaceCardData
default_template_path = TEMPLATE_MODELCARD_PATH
repo_type = "space" | class_definition | 19,558 | 19,696 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/repocard.py | null | 110 |
class CommandInProgress:
"""
Utility to follow commands launched asynchronously.
"""
def __init__(
self,
title: str,
is_done_method: Callable,
status_method: Callable,
process: subprocess.Popen,
post_method: Optional[Callable] = None,
):
self.title = title
self._is_done = is_done_method
self._status = status_method
self._process = process
self._stderr = ""
self._stdout = ""
self._post_method = post_method
@property
def is_done(self) -> bool:
"""
Whether the process is done.
"""
result = self._is_done()
if result and self._post_method is not None:
self._post_method()
self._post_method = None
return result
@property
def status(self) -> int:
"""
The exit code/status of the current action. Will return `0` if the
command has completed successfully, and a number between 1 and 255 if
the process errored-out.
Will return -1 if the command is still ongoing.
"""
return self._status()
@property
def failed(self) -> bool:
"""
Whether the process errored-out.
"""
return self.status > 0
@property
def stderr(self) -> str:
"""
The current output message on the standard error.
"""
if self._process.stderr is not None:
self._stderr += self._process.stderr.read()
return self._stderr
@property
def stdout(self) -> str:
"""
The current output message on the standard output.
"""
if self._process.stdout is not None:
self._stdout += self._process.stdout.read()
return self._stdout
def __repr__(self):
status = self.status
if status == -1:
status = "running"
return (
f"[{self.title} command, status code: {status},"
f" {'in progress.' if not self.is_done else 'finished.'} PID:"
f" {self._process.pid}]"
) | class_definition | 697 | 2,821 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/repository.py | null | 111 |
class PbarT(TypedDict):
# Used to store an opened progress bar in `_lfs_log_progress`
bar: tqdm
past_bytes: int | class_definition | 8,877 | 9,000 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/repository.py | null | 112 |
class Repository:
"""
Helper class to wrap the git and git-lfs commands.
The aim is to facilitate interacting with huggingface.co hosted model or
dataset repos, though not a lot here (if any) is actually specific to
huggingface.co.
<Tip warning={true}>
[`Repository`] is deprecated in favor of the http-based alternatives implemented in
[`HfApi`]. Given its large adoption in legacy code, the complete removal of
[`Repository`] will only happen in release `v1.0`. For more details, please read
https://huggingface.co/docs/huggingface_hub/concepts/git_vs_http.
</Tip>
"""
command_queue: List[CommandInProgress]
@validate_hf_hub_args
@_deprecate_method(
version="1.0",
message=(
"Please prefer the http-based alternatives instead. Given its large adoption in legacy code, the complete"
" removal is only planned on next major release.\nFor more details, please read"
" https://huggingface.co/docs/huggingface_hub/concepts/git_vs_http."
),
)
def __init__(
self,
local_dir: Union[str, Path],
clone_from: Optional[str] = None,
repo_type: Optional[str] = None,
token: Union[bool, str] = True,
git_user: Optional[str] = None,
git_email: Optional[str] = None,
revision: Optional[str] = None,
skip_lfs_files: bool = False,
client: Optional[HfApi] = None,
):
"""
Instantiate a local clone of a git repo.
If `clone_from` is set, the repo will be cloned from an existing remote repository.
If the remote repo does not exist, a `EnvironmentError` exception will be thrown.
Please create the remote repo first using [`create_repo`].
`Repository` uses the local git credentials by default. If explicitly set, the `token`
or the `git_user`/`git_email` pair will be used instead.
Args:
local_dir (`str` or `Path`):
path (e.g. `'my_trained_model/'`) to the local directory, where
the `Repository` will be initialized.
clone_from (`str`, *optional*):
Either a repository url or `repo_id`.
Example:
- `"https://huggingface.co/philschmid/playground-tests"`
- `"philschmid/playground-tests"`
repo_type (`str`, *optional*):
To set when cloning a repo from a repo_id. Default is model.
token (`bool` or `str`, *optional*):
A valid authentication token (see https://huggingface.co/settings/token).
If `None` or `True` and machine is logged in (through `huggingface-cli login`
or [`~huggingface_hub.login`]), token will be retrieved from the cache.
If `False`, token is not sent in the request header.
git_user (`str`, *optional*):
will override the `git config user.name` for committing and
pushing files to the hub.
git_email (`str`, *optional*):
will override the `git config user.email` for committing and
pushing files to the hub.
revision (`str`, *optional*):
Revision to checkout after initializing the repository. If the
revision doesn't exist, a branch will be created with that
revision name from the default branch's current HEAD.
skip_lfs_files (`bool`, *optional*, defaults to `False`):
whether to skip git-LFS files or not.
client (`HfApi`, *optional*):
Instance of [`HfApi`] to use when calling the HF Hub API. A new
instance will be created if this is left to `None`.
Raises:
[`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
If the remote repository set in `clone_from` does not exist.
"""
if isinstance(local_dir, Path):
local_dir = str(local_dir)
os.makedirs(local_dir, exist_ok=True)
self.local_dir = os.path.join(os.getcwd(), local_dir)
self._repo_type = repo_type
self.command_queue = []
self.skip_lfs_files = skip_lfs_files
self.client = client if client is not None else HfApi()
self.check_git_versions()
if isinstance(token, str):
self.huggingface_token: Optional[str] = token
elif token is False:
self.huggingface_token = None
else:
# if `True` -> explicit use of the cached token
# if `None` -> implicit use of the cached token
self.huggingface_token = get_token()
if clone_from is not None:
self.clone_from(repo_url=clone_from)
else:
if is_git_repo(self.local_dir):
logger.debug("[Repository] is a valid git repo")
else:
raise ValueError("If not specifying `clone_from`, you need to pass Repository a valid git clone.")
if self.huggingface_token is not None and (git_email is None or git_user is None):
user = self.client.whoami(self.huggingface_token)
if git_email is None:
git_email = user.get("email")
if git_user is None:
git_user = user.get("fullname")
if git_user is not None or git_email is not None:
self.git_config_username_and_email(git_user, git_email)
self.lfs_enable_largefiles()
self.git_credential_helper_store()
if revision is not None:
self.git_checkout(revision, create_branch_ok=True)
# This ensures that all commands exit before exiting the Python runtime.
# This will ensure all pushes register on the hub, even if other errors happen in subsequent operations.
atexit.register(self.wait_for_commands)
@property
def current_branch(self) -> str:
"""
Returns the current checked out branch.
Returns:
`str`: Current checked out branch.
"""
try:
result = run_subprocess("git rev-parse --abbrev-ref HEAD", self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return result
def check_git_versions(self):
"""
Checks that `git` and `git-lfs` can be run.
Raises:
[`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
If `git` or `git-lfs` are not installed.
"""
try:
git_version = run_subprocess("git --version", self.local_dir).stdout.strip()
except FileNotFoundError:
raise EnvironmentError("Looks like you do not have git installed, please install.")
try:
lfs_version = run_subprocess("git-lfs --version", self.local_dir).stdout.strip()
except FileNotFoundError:
raise EnvironmentError(
"Looks like you do not have git-lfs installed, please install."
" You can install from https://git-lfs.github.com/."
" Then run `git lfs install` (you only have to do this once)."
)
logger.info(git_version + "\n" + lfs_version)
@validate_hf_hub_args
def clone_from(self, repo_url: str, token: Union[bool, str, None] = None):
"""
Clone from a remote. If the folder already exists, will try to clone the
repository within it.
If this folder is a git repository with linked history, will try to
update the repository.
Args:
repo_url (`str`):
The URL from which to clone the repository
token (`Union[str, bool]`, *optional*):
Whether to use the authentication token. It can be:
- a string which is the token itself
- `False`, which would not use the authentication token
- `True`, which would fetch the authentication token from the
local folder and use it (you should be logged in for this to
work).
- `None`, which would retrieve the value of
`self.huggingface_token`.
<Tip>
Raises the following error:
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
if an organization token (starts with "api_org") is passed. Use must use
your own personal access token (see https://hf.co/settings/tokens).
- [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
if you are trying to clone the repository in a non-empty folder, or if the
`git` operations raise errors.
</Tip>
"""
token = (
token # str -> use it
if isinstance(token, str)
else (
None # `False` -> explicit no token
if token is False
else self.huggingface_token # `None` or `True` -> use default
)
)
if token is not None and token.startswith("api_org"):
raise ValueError(
"You must use your personal access token, not an Organization token"
" (see https://hf.co/settings/tokens)."
)
hub_url = self.client.endpoint
if hub_url in repo_url or ("http" not in repo_url and len(repo_url.split("/")) <= 2):
repo_type, namespace, repo_name = repo_type_and_id_from_hf_id(repo_url, hub_url=hub_url)
repo_id = f"{namespace}/{repo_name}" if namespace is not None else repo_name
if repo_type is not None:
self._repo_type = repo_type
repo_url = hub_url + "/"
if self._repo_type in constants.REPO_TYPES_URL_PREFIXES:
repo_url += constants.REPO_TYPES_URL_PREFIXES[self._repo_type]
if token is not None:
# Add token in git url when provided
scheme = urlparse(repo_url).scheme
repo_url = repo_url.replace(f"{scheme}://", f"{scheme}://user:{token}@")
repo_url += repo_id
# For error messages, it's cleaner to show the repo url without the token.
clean_repo_url = re.sub(r"(https?)://.*@", r"\1://", repo_url)
try:
run_subprocess("git lfs install", self.local_dir)
# checks if repository is initialized in a empty repository or in one with files
if len(os.listdir(self.local_dir)) == 0:
logger.warning(f"Cloning {clean_repo_url} into local empty directory.")
with _lfs_log_progress():
env = os.environ.copy()
if self.skip_lfs_files:
env.update({"GIT_LFS_SKIP_SMUDGE": "1"})
run_subprocess(
# 'git lfs clone' is deprecated (will display a warning in the terminal)
# but we still use it as it provides a nicer UX when downloading large
# files (shows progress).
f"{'git clone' if self.skip_lfs_files else 'git lfs clone'} {repo_url} .",
self.local_dir,
env=env,
)
else:
# Check if the folder is the root of a git repository
if not is_git_repo(self.local_dir):
raise EnvironmentError(
"Tried to clone a repository in a non-empty folder that isn't"
f" a git repository ('{self.local_dir}'). If you really want to"
f" do this, do it manually:\n cd {self.local_dir} && git init"
" && git remote add origin && git pull origin main\n or clone"
" repo to a new folder and move your existing files there"
" afterwards."
)
if is_local_clone(self.local_dir, repo_url):
logger.warning(
f"{self.local_dir} is already a clone of {clean_repo_url}."
" Make sure you pull the latest changes with"
" `repo.git_pull()`."
)
else:
output = run_subprocess("git remote get-url origin", self.local_dir, check=False)
error_msg = (
f"Tried to clone {clean_repo_url} in an unrelated git"
" repository.\nIf you believe this is an error, please add"
f" a remote with the following URL: {clean_repo_url}."
)
if output.returncode == 0:
clean_local_remote_url = re.sub(r"https://.*@", "https://", output.stdout)
error_msg += f"\nLocal path has its origin defined as: {clean_local_remote_url}"
raise EnvironmentError(error_msg)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_config_username_and_email(self, git_user: Optional[str] = None, git_email: Optional[str] = None):
"""
Sets git username and email (only in the current repo).
Args:
git_user (`str`, *optional*):
The username to register through `git`.
git_email (`str`, *optional*):
The email to register through `git`.
"""
try:
if git_user is not None:
run_subprocess("git config user.name".split() + [git_user], self.local_dir)
if git_email is not None:
run_subprocess(f"git config user.email {git_email}".split(), self.local_dir)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_credential_helper_store(self):
"""
Sets the git credential helper to `store`
"""
try:
run_subprocess("git config credential.helper store", self.local_dir)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_head_hash(self) -> str:
"""
Get commit sha on top of HEAD.
Returns:
`str`: The current checked out commit SHA.
"""
try:
p = run_subprocess("git rev-parse HEAD", self.local_dir)
return p.stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_remote_url(self) -> str:
"""
Get URL to origin remote.
Returns:
`str`: The URL of the `origin` remote.
"""
try:
p = run_subprocess("git config --get remote.origin.url", self.local_dir)
url = p.stdout.strip()
# Strip basic auth info.
return re.sub(r"https://.*@", "https://", url)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_head_commit_url(self) -> str:
"""
Get URL to last commit on HEAD. We assume it's been pushed, and the url
scheme is the same one as for GitHub or HuggingFace.
Returns:
`str`: The URL to the current checked-out commit.
"""
sha = self.git_head_hash()
url = self.git_remote_url()
if url.endswith("/"):
url = url[:-1]
return f"{url}/commit/{sha}"
def list_deleted_files(self) -> List[str]:
"""
Returns a list of the files that are deleted in the working directory or
index.
Returns:
`List[str]`: A list of files that have been deleted in the working
directory or index.
"""
try:
git_status = run_subprocess("git status -s", self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if len(git_status) == 0:
return []
# Receives a status like the following
# D .gitignore
# D new_file.json
# AD new_file1.json
# ?? new_file2.json
# ?? new_file4.json
# Strip each line of whitespaces
modified_files_statuses = [status.strip() for status in git_status.split("\n")]
# Only keep files that are deleted using the D prefix
deleted_files_statuses = [status for status in modified_files_statuses if "D" in status.split()[0]]
# Remove the D prefix and strip to keep only the relevant filename
deleted_files = [status.split()[-1].strip() for status in deleted_files_statuses]
return deleted_files
def lfs_track(self, patterns: Union[str, List[str]], filename: bool = False):
"""
Tell git-lfs to track files according to a pattern.
Setting the `filename` argument to `True` will treat the arguments as
literal filenames, not as patterns. Any special glob characters in the
filename will be escaped when writing to the `.gitattributes` file.
Args:
patterns (`Union[str, List[str]]`):
The pattern, or list of patterns, to track with git-lfs.
filename (`bool`, *optional*, defaults to `False`):
Whether to use the patterns as literal filenames.
"""
if isinstance(patterns, str):
patterns = [patterns]
try:
for pattern in patterns:
run_subprocess(
f"git lfs track {'--filename' if filename else ''} {pattern}",
self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def lfs_untrack(self, patterns: Union[str, List[str]]):
"""
Tell git-lfs to untrack those files.
Args:
patterns (`Union[str, List[str]]`):
The pattern, or list of patterns, to untrack with git-lfs.
"""
if isinstance(patterns, str):
patterns = [patterns]
try:
for pattern in patterns:
run_subprocess("git lfs untrack".split() + [pattern], self.local_dir)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def lfs_enable_largefiles(self):
"""
HF-specific. This enables upload support of files >5GB.
"""
try:
lfs_config = "git config lfs.customtransfer.multipart"
run_subprocess(f"{lfs_config}.path huggingface-cli", self.local_dir)
run_subprocess(
f"{lfs_config}.args {LFS_MULTIPART_UPLOAD_COMMAND}",
self.local_dir,
)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def auto_track_binary_files(self, pattern: str = ".") -> List[str]:
"""
Automatically track binary files with git-lfs.
Args:
pattern (`str`, *optional*, defaults to "."):
The pattern with which to track files that are binary.
Returns:
`List[str]`: List of filenames that are now tracked due to being
binary files
"""
files_to_be_tracked_with_lfs = []
deleted_files = self.list_deleted_files()
for filename in files_to_be_staged(pattern, folder=self.local_dir):
if filename in deleted_files:
continue
path_to_file = os.path.join(os.getcwd(), self.local_dir, filename)
if not (is_tracked_with_lfs(path_to_file) or is_git_ignored(path_to_file)):
size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024)
if size_in_mb >= 10:
logger.warning(
"Parsing a large file to check if binary or not. Tracking large"
" files using `repository.auto_track_large_files` is"
" recommended so as to not load the full file in memory."
)
is_binary = is_binary_file(path_to_file)
if is_binary:
self.lfs_track(filename)
files_to_be_tracked_with_lfs.append(filename)
# Cleanup the .gitattributes if files were deleted
self.lfs_untrack(deleted_files)
return files_to_be_tracked_with_lfs
def auto_track_large_files(self, pattern: str = ".") -> List[str]:
"""
Automatically track large files (files that weigh more than 10MBs) with
git-lfs.
Args:
pattern (`str`, *optional*, defaults to "."):
The pattern with which to track files that are above 10MBs.
Returns:
`List[str]`: List of filenames that are now tracked due to their
size.
"""
files_to_be_tracked_with_lfs = []
deleted_files = self.list_deleted_files()
for filename in files_to_be_staged(pattern, folder=self.local_dir):
if filename in deleted_files:
continue
path_to_file = os.path.join(os.getcwd(), self.local_dir, filename)
size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024)
if size_in_mb >= 10 and not is_tracked_with_lfs(path_to_file) and not is_git_ignored(path_to_file):
self.lfs_track(filename)
files_to_be_tracked_with_lfs.append(filename)
# Cleanup the .gitattributes if files were deleted
self.lfs_untrack(deleted_files)
return files_to_be_tracked_with_lfs
def lfs_prune(self, recent=False):
"""
git lfs prune
Args:
recent (`bool`, *optional*, defaults to `False`):
Whether to prune files even if they were referenced by recent
commits. See the following
[link](https://github.com/git-lfs/git-lfs/blob/f3d43f0428a84fc4f1e5405b76b5a73ec2437e65/docs/man/git-lfs-prune.1.ronn#recent-files)
for more information.
"""
try:
with _lfs_log_progress():
result = run_subprocess(f"git lfs prune {'--recent' if recent else ''}", self.local_dir)
logger.info(result.stdout)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_pull(self, rebase: bool = False, lfs: bool = False):
"""
git pull
Args:
rebase (`bool`, *optional*, defaults to `False`):
Whether to rebase the current branch on top of the upstream
branch after fetching.
lfs (`bool`, *optional*, defaults to `False`):
Whether to fetch the LFS files too. This option only changes the
behavior when a repository was cloned without fetching the LFS
files; calling `repo.git_pull(lfs=True)` will then fetch the LFS
file from the remote repository.
"""
command = "git pull" if not lfs else "git lfs pull"
if rebase:
command += " --rebase"
try:
with _lfs_log_progress():
result = run_subprocess(command, self.local_dir)
logger.info(result.stdout)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_add(self, pattern: str = ".", auto_lfs_track: bool = False):
"""
git add
Setting the `auto_lfs_track` parameter to `True` will automatically
track files that are larger than 10MB with `git-lfs`.
Args:
pattern (`str`, *optional*, defaults to "."):
The pattern with which to add files to staging.
auto_lfs_track (`bool`, *optional*, defaults to `False`):
Whether to automatically track large and binary files with
git-lfs. Any file over 10MB in size, or in binary format, will
be automatically tracked.
"""
if auto_lfs_track:
# Track files according to their size (>=10MB)
tracked_files = self.auto_track_large_files(pattern)
# Read the remaining files and track them if they're binary
tracked_files.extend(self.auto_track_binary_files(pattern))
if tracked_files:
logger.warning(
f"Adding files tracked by Git LFS: {tracked_files}. This may take a"
" bit of time if the files are large."
)
try:
result = run_subprocess("git add -v".split() + [pattern], self.local_dir)
logger.info(f"Adding to index:\n{result.stdout}\n")
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def git_commit(self, commit_message: str = "commit files to HF hub"):
"""
git commit
Args:
commit_message (`str`, *optional*, defaults to "commit files to HF hub"):
The message attributed to the commit.
"""
try:
result = run_subprocess("git commit -v -m".split() + [commit_message], self.local_dir)
logger.info(f"Committed:\n{result.stdout}\n")
except subprocess.CalledProcessError as exc:
if len(exc.stderr) > 0:
raise EnvironmentError(exc.stderr)
else:
raise EnvironmentError(exc.stdout)
def git_push(
self,
upstream: Optional[str] = None,
blocking: bool = True,
auto_lfs_prune: bool = False,
) -> Union[str, Tuple[str, CommandInProgress]]:
"""
git push
If used without setting `blocking`, will return url to commit on remote
repo. If used with `blocking=True`, will return a tuple containing the
url to commit and the command object to follow for information about the
process.
Args:
upstream (`str`, *optional*):
Upstream to which this should push. If not specified, will push
to the lastly defined upstream or to the default one (`origin
main`).
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the push has
finished. Setting this to `False` will return an
`CommandInProgress` object which has an `is_done` property. This
property will be set to `True` when the push is finished.
auto_lfs_prune (`bool`, *optional*, defaults to `False`):
Whether to automatically prune files once they have been pushed
to the remote.
"""
command = "git push"
if upstream:
command += f" --set-upstream {upstream}"
number_of_commits = commits_to_push(self.local_dir, upstream)
if number_of_commits > 1:
logger.warning(f"Several commits ({number_of_commits}) will be pushed upstream.")
if blocking:
logger.warning("The progress bars may be unreliable.")
try:
with _lfs_log_progress():
process = subprocess.Popen(
command.split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
cwd=self.local_dir,
)
if blocking:
stdout, stderr = process.communicate()
return_code = process.poll()
process.kill()
if len(stderr):
logger.warning(stderr)
if return_code:
raise subprocess.CalledProcessError(return_code, process.args, output=stdout, stderr=stderr)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if not blocking:
def status_method():
status = process.poll()
if status is None:
return -1
else:
return status
command_in_progress = CommandInProgress(
"push",
is_done_method=lambda: process.poll() is not None,
status_method=status_method,
process=process,
post_method=self.lfs_prune if auto_lfs_prune else None,
)
self.command_queue.append(command_in_progress)
return self.git_head_commit_url(), command_in_progress
if auto_lfs_prune:
self.lfs_prune()
return self.git_head_commit_url()
def git_checkout(self, revision: str, create_branch_ok: bool = False):
"""
git checkout a given revision
Specifying `create_branch_ok` to `True` will create the branch to the
given revision if that revision doesn't exist.
Args:
revision (`str`):
The revision to checkout.
create_branch_ok (`str`, *optional*, defaults to `False`):
Whether creating a branch named with the `revision` passed at
the current checked-out reference if `revision` isn't an
existing revision is allowed.
"""
try:
result = run_subprocess(f"git checkout {revision}", self.local_dir)
logger.warning(f"Checked out {revision} from {self.current_branch}.")
logger.warning(result.stdout)
except subprocess.CalledProcessError as exc:
if not create_branch_ok:
raise EnvironmentError(exc.stderr)
else:
try:
result = run_subprocess(f"git checkout -b {revision}", self.local_dir)
logger.warning(
f"Revision `{revision}` does not exist. Created and checked out branch `{revision}`."
)
logger.warning(result.stdout)
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def tag_exists(self, tag_name: str, remote: Optional[str] = None) -> bool:
"""
Check if a tag exists or not.
Args:
tag_name (`str`):
The name of the tag to check.
remote (`str`, *optional*):
Whether to check if the tag exists on a remote. This parameter
should be the identifier of the remote.
Returns:
`bool`: Whether the tag exists.
"""
if remote:
try:
result = run_subprocess(f"git ls-remote origin refs/tags/{tag_name}", self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return len(result) != 0
else:
try:
git_tags = run_subprocess("git tag", self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
git_tags = git_tags.split("\n")
return tag_name in git_tags
def delete_tag(self, tag_name: str, remote: Optional[str] = None) -> bool:
"""
Delete a tag, both local and remote, if it exists
Args:
tag_name (`str`):
The tag name to delete.
remote (`str`, *optional*):
The remote on which to delete the tag.
Returns:
`bool`: `True` if deleted, `False` if the tag didn't exist.
If remote is not passed, will just be updated locally
"""
delete_locally = True
delete_remotely = True
if not self.tag_exists(tag_name):
delete_locally = False
if not self.tag_exists(tag_name, remote=remote):
delete_remotely = False
if delete_locally:
try:
run_subprocess(["git", "tag", "-d", tag_name], self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if remote and delete_remotely:
try:
run_subprocess(f"git push {remote} --delete {tag_name}", self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return True
def add_tag(self, tag_name: str, message: Optional[str] = None, remote: Optional[str] = None):
"""
Add a tag at the current head and push it
If remote is None, will just be updated locally
If no message is provided, the tag will be lightweight. if a message is
provided, the tag will be annotated.
Args:
tag_name (`str`):
The name of the tag to be added.
message (`str`, *optional*):
The message that accompanies the tag. The tag will turn into an
annotated tag if a message is passed.
remote (`str`, *optional*):
The remote on which to add the tag.
"""
if message:
tag_args = ["git", "tag", "-a", tag_name, "-m", message]
else:
tag_args = ["git", "tag", tag_name]
try:
run_subprocess(tag_args, self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
if remote:
try:
run_subprocess(f"git push {remote} {tag_name}", self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
def is_repo_clean(self) -> bool:
"""
Return whether or not the git status is clean or not
Returns:
`bool`: `True` if the git status is clean, `False` otherwise.
"""
try:
git_status = run_subprocess("git status --porcelain", self.local_dir).stdout.strip()
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return len(git_status) == 0
def push_to_hub(
self,
commit_message: str = "commit files to HF hub",
blocking: bool = True,
clean_ok: bool = True,
auto_lfs_prune: bool = False,
) -> Union[None, str, Tuple[str, CommandInProgress]]:
"""
Helper to add, commit, and push files to remote repository on the
HuggingFace Hub. Will automatically track large files (>10MB).
Args:
commit_message (`str`):
Message to use for the commit.
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the `git push` has
finished.
clean_ok (`bool`, *optional*, defaults to `True`):
If True, this function will return None if the repo is
untouched. Default behavior is to fail because the git command
fails.
auto_lfs_prune (`bool`, *optional*, defaults to `False`):
Whether to automatically prune files once they have been pushed
to the remote.
"""
if clean_ok and self.is_repo_clean():
logger.info("Repo currently clean. Ignoring push_to_hub")
return None
self.git_add(auto_lfs_track=True)
self.git_commit(commit_message)
return self.git_push(
upstream=f"origin {self.current_branch}",
blocking=blocking,
auto_lfs_prune=auto_lfs_prune,
)
@contextmanager
def commit(
self,
commit_message: str,
branch: Optional[str] = None,
track_large_files: bool = True,
blocking: bool = True,
auto_lfs_prune: bool = False,
):
"""
Context manager utility to handle committing to a repository. This
automatically tracks large files (>10Mb) with git-lfs. Set the
`track_large_files` argument to `False` if you wish to ignore that
behavior.
Args:
commit_message (`str`):
Message to use for the commit.
branch (`str`, *optional*):
The branch on which the commit will appear. This branch will be
checked-out before any operation.
track_large_files (`bool`, *optional*, defaults to `True`):
Whether to automatically track large files or not. Will do so by
default.
blocking (`bool`, *optional*, defaults to `True`):
Whether the function should return only when the `git push` has
finished.
auto_lfs_prune (`bool`, defaults to `True`):
Whether to automatically prune files once they have been pushed
to the remote.
Examples:
```python
>>> with Repository(
... "text-files",
... clone_from="<user>/text-files",
... token=True,
>>> ).commit("My first file :)"):
... with open("file.txt", "w+") as f:
... f.write(json.dumps({"hey": 8}))
>>> import torch
>>> model = torch.nn.Transformer()
>>> with Repository(
... "torch-model",
... clone_from="<user>/torch-model",
... token=True,
>>> ).commit("My cool model :)"):
... torch.save(model.state_dict(), "model.pt")
```
"""
files_to_stage = files_to_be_staged(".", folder=self.local_dir)
if len(files_to_stage):
files_in_msg = str(files_to_stage[:5])[:-1] + ", ...]" if len(files_to_stage) > 5 else str(files_to_stage)
logger.error(
"There exists some updated files in the local repository that are not"
f" committed: {files_in_msg}. This may lead to errors if checking out"
" a branch. These files and their modifications will be added to the"
" current commit."
)
if branch is not None:
self.git_checkout(branch, create_branch_ok=True)
if is_tracked_upstream(self.local_dir):
logger.warning("Pulling changes ...")
self.git_pull(rebase=True)
else:
logger.warning(f"The current branch has no upstream branch. Will push to 'origin {self.current_branch}'")
current_working_directory = os.getcwd()
os.chdir(os.path.join(current_working_directory, self.local_dir))
try:
yield self
finally:
self.git_add(auto_lfs_track=track_large_files)
try:
self.git_commit(commit_message)
except OSError as e:
# If no changes are detected, there is nothing to commit.
if "nothing to commit" not in str(e):
raise e
try:
self.git_push(
upstream=f"origin {self.current_branch}",
blocking=blocking,
auto_lfs_prune=auto_lfs_prune,
)
except OSError as e:
# If no changes are detected, there is nothing to commit.
if "could not read Username" in str(e):
raise OSError("Couldn't authenticate user for push. Did you set `token` to `True`?") from e
else:
raise e
os.chdir(current_working_directory)
def repocard_metadata_load(self) -> Optional[Dict]:
filepath = os.path.join(self.local_dir, constants.REPOCARD_NAME)
if os.path.isfile(filepath):
return metadata_load(filepath)
return None
def repocard_metadata_save(self, data: Dict) -> None:
return metadata_save(os.path.join(self.local_dir, constants.REPOCARD_NAME), data)
@property
def commands_failed(self):
"""
Returns the asynchronous commands that failed.
"""
return [c for c in self.command_queue if c.status > 0]
@property
def commands_in_progress(self):
"""
Returns the asynchronous commands that are currently in progress.
"""
return [c for c in self.command_queue if not c.is_done]
def wait_for_commands(self):
"""
Blocking method: blocks all subsequent execution until all commands have
been processed.
"""
index = 0
for command_failed in self.commands_failed:
logger.error(f"The {command_failed.title} command with PID {command_failed._process.pid} failed.")
logger.error(command_failed.stderr)
while self.commands_in_progress:
if index % 10 == 0:
logger.warning(
f"Waiting for the following commands to finish before shutting down: {self.commands_in_progress}."
)
index += 1
time.sleep(1) | class_definition | 13,072 | 54,556 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/repository.py | null | 113 |
class HFSummaryWriter(SummaryWriter):
"""
Wrapper around the tensorboard's `SummaryWriter` to push training logs to the Hub.
Data is logged locally and then pushed to the Hub asynchronously. Pushing data to the Hub is done in a separate
thread to avoid blocking the training script. In particular, if the upload fails for any reason (e.g. a connection
issue), the main script will not be interrupted. Data is automatically pushed to the Hub every `commit_every`
minutes (default to every 5 minutes).
<Tip warning={true}>
`HFSummaryWriter` is experimental. Its API is subject to change in the future without prior notice.
</Tip>
Args:
repo_id (`str`):
The id of the repo to which the logs will be pushed.
logdir (`str`, *optional*):
The directory where the logs will be written. If not specified, a local directory will be created by the
underlying `SummaryWriter` object.
commit_every (`int` or `float`, *optional*):
The frequency (in minutes) at which the logs will be pushed to the Hub. Defaults to 5 minutes.
squash_history (`bool`, *optional*):
Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
useful to avoid degraded performances on the repo when it grows too large.
repo_type (`str`, *optional*):
The type of the repo to which the logs will be pushed. Defaults to "model".
repo_revision (`str`, *optional*):
The revision of the repo to which the logs will be pushed. Defaults to "main".
repo_private (`bool`, *optional*):
Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
path_in_repo (`str`, *optional*):
The path to the folder in the repo where the logs will be pushed. Defaults to "tensorboard/".
repo_allow_patterns (`List[str]` or `str`, *optional*):
A list of patterns to include in the upload. Defaults to `"*.tfevents.*"`. Check out the
[upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
repo_ignore_patterns (`List[str]` or `str`, *optional*):
A list of patterns to exclude in the upload. Check out the
[upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
token (`str`, *optional*):
Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more
details
kwargs:
Additional keyword arguments passed to `SummaryWriter`.
Examples:
```diff
# Taken from https://pytorch.org/docs/stable/tensorboard.html
- from torch.utils.tensorboard import SummaryWriter
+ from huggingface_hub import HFSummaryWriter
import numpy as np
- writer = SummaryWriter()
+ writer = HFSummaryWriter(repo_id="username/my-trained-model")
for n_iter in range(100):
writer.add_scalar('Loss/train', np.random.random(), n_iter)
writer.add_scalar('Loss/test', np.random.random(), n_iter)
writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
```
```py
>>> from huggingface_hub import HFSummaryWriter
# Logs are automatically pushed every 15 minutes (5 by default) + when exiting the context manager
>>> with HFSummaryWriter(repo_id="test_hf_logger", commit_every=15) as logger:
... logger.add_scalar("a", 1)
... logger.add_scalar("b", 2)
```
"""
@experimental
def __new__(cls, *args, **kwargs) -> "HFSummaryWriter":
if not is_summary_writer_available:
raise ImportError(
"You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade"
" tensorboardX` first."
)
return super().__new__(cls)
def __init__(
self,
repo_id: str,
*,
logdir: Optional[str] = None,
commit_every: Union[int, float] = 5,
squash_history: bool = False,
repo_type: Optional[str] = None,
repo_revision: Optional[str] = None,
repo_private: Optional[bool] = None,
path_in_repo: Optional[str] = "tensorboard",
repo_allow_patterns: Optional[Union[List[str], str]] = "*.tfevents.*",
repo_ignore_patterns: Optional[Union[List[str], str]] = None,
token: Optional[str] = None,
**kwargs,
):
# Initialize SummaryWriter
super().__init__(logdir=logdir, **kwargs)
# Check logdir has been correctly initialized and fail early otherwise. In practice, SummaryWriter takes care of it.
if not isinstance(self.logdir, str):
raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.")
# Append logdir name to `path_in_repo`
if path_in_repo is None or path_in_repo == "":
path_in_repo = Path(self.logdir).name
else:
path_in_repo = path_in_repo.strip("/") + "/" + Path(self.logdir).name
# Initialize scheduler
self.scheduler = CommitScheduler(
folder_path=self.logdir,
path_in_repo=path_in_repo,
repo_id=repo_id,
repo_type=repo_type,
revision=repo_revision,
private=repo_private,
token=token,
allow_patterns=repo_allow_patterns,
ignore_patterns=repo_ignore_patterns,
every=commit_every,
squash_history=squash_history,
)
# Exposing some high-level info at root level
self.repo_id = self.scheduler.repo_id
self.repo_type = self.scheduler.repo_type
self.repo_revision = self.scheduler.revision
# Add `hf-summary-writer` tag to the model card metadata
try:
card = ModelCard.load(repo_id_or_path=self.repo_id, repo_type=self.repo_type)
except EntryNotFoundError:
card = ModelCard("")
tags = card.data.get("tags", [])
if "hf-summary-writer" not in tags:
tags.append("hf-summary-writer")
card.data["tags"] = tags
card.push_to_hub(repo_id=self.repo_id, repo_type=self.repo_type)
def __exit__(self, exc_type, exc_val, exc_tb):
"""Push to hub in a non-blocking way when exiting the logger's context manager."""
super().__exit__(exc_type, exc_val, exc_tb)
future = self.scheduler.trigger()
future.result() | class_definition | 1,565 | 8,357 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_tensorboard_logger.py | null | 114 |
class HfFileSystemResolvedPath:
"""Data structure containing information about a resolved Hugging Face file system path."""
repo_type: str
repo_id: str
revision: str
path_in_repo: str
# The part placed after '@' in the initial path. It can be a quoted or unquoted refs revision.
# Used to reconstruct the unresolved path to return to the user.
_raw_revision: Optional[str] = field(default=None, repr=False)
def unresolve(self) -> str:
repo_path = constants.REPO_TYPES_URL_PREFIXES.get(self.repo_type, "") + self.repo_id
if self._raw_revision:
return f"{repo_path}@{self._raw_revision}/{self.path_in_repo}".rstrip("/")
elif self.revision != constants.DEFAULT_REVISION:
return f"{repo_path}@{safe_revision(self.revision)}/{self.path_in_repo}".rstrip("/")
else:
return f"{repo_path}/{self.path_in_repo}".rstrip("/") | class_definition | 1,095 | 2,013 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/hf_file_system.py | null | 115 |
class HfFileSystem(fsspec.AbstractFileSystem):
"""
Access a remote Hugging Face Hub repository as if were a local file system.
<Tip warning={true}>
[`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
</Tip>
Args:
token (`str` or `bool`, *optional*):
A valid user access token (string). Defaults to the locally saved
token, which is the recommended method for authentication (see
https://huggingface.co/docs/huggingface_hub/quick-start#authentication).
To disable authentication, pass `False`.
endpoint (`str`, *optional*):
Endpoint of the Hub. Defaults to <https://huggingface.co>.
Usage:
```python
>>> from huggingface_hub import HfFileSystem
>>> fs = HfFileSystem()
>>> # List files
>>> fs.glob("my-username/my-model/*.bin")
['my-username/my-model/pytorch_model.bin']
>>> fs.ls("datasets/my-username/my-dataset", detail=False)
['datasets/my-username/my-dataset/.gitattributes', 'datasets/my-username/my-dataset/README.md', 'datasets/my-username/my-dataset/data.json']
>>> # Read/write files
>>> with fs.open("my-username/my-model/pytorch_model.bin") as f:
... data = f.read()
>>> with fs.open("my-username/my-model/pytorch_model.bin", "wb") as f:
... f.write(data)
```
"""
root_marker = ""
protocol = "hf"
def __init__(
self,
*args,
endpoint: Optional[str] = None,
token: Union[bool, str, None] = None,
**storage_options,
):
super().__init__(*args, **storage_options)
self.endpoint = endpoint or constants.ENDPOINT
self.token = token
self._api = HfApi(endpoint=endpoint, token=token)
# Maps (repo_type, repo_id, revision) to a 2-tuple with:
# * the 1st element indicating whether the repositoy and the revision exist
# * the 2nd element being the exception raised if the repository or revision doesn't exist
self._repo_and_revision_exists_cache: Dict[
Tuple[str, str, Optional[str]], Tuple[bool, Optional[Exception]]
] = {}
def _repo_and_revision_exist(
self, repo_type: str, repo_id: str, revision: Optional[str]
) -> Tuple[bool, Optional[Exception]]:
if (repo_type, repo_id, revision) not in self._repo_and_revision_exists_cache:
try:
self._api.repo_info(
repo_id, revision=revision, repo_type=repo_type, timeout=constants.HF_HUB_ETAG_TIMEOUT
)
except (RepositoryNotFoundError, HFValidationError) as e:
self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)] = False, e
self._repo_and_revision_exists_cache[(repo_type, repo_id, None)] = False, e
except RevisionNotFoundError as e:
self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)] = False, e
self._repo_and_revision_exists_cache[(repo_type, repo_id, None)] = True, None
else:
self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)] = True, None
self._repo_and_revision_exists_cache[(repo_type, repo_id, None)] = True, None
return self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)]
def resolve_path(self, path: str, revision: Optional[str] = None) -> HfFileSystemResolvedPath:
"""
Resolve a Hugging Face file system path into its components.
Args:
path (`str`):
Path to resolve.
revision (`str`, *optional*):
The revision of the repo to resolve. Defaults to the revision specified in the path.
Returns:
[`HfFileSystemResolvedPath`]: Resolved path information containing `repo_type`, `repo_id`, `revision` and `path_in_repo`.
Raises:
`ValueError`:
If path contains conflicting revision information.
`NotImplementedError`:
If trying to list repositories.
"""
def _align_revision_in_path_with_revision(
revision_in_path: Optional[str], revision: Optional[str]
) -> Optional[str]:
if revision is not None:
if revision_in_path is not None and revision_in_path != revision:
raise ValueError(
f'Revision specified in path ("{revision_in_path}") and in `revision` argument ("{revision}")'
" are not the same."
)
else:
revision = revision_in_path
return revision
path = self._strip_protocol(path)
if not path:
# can't list repositories at root
raise NotImplementedError("Access to repositories lists is not implemented.")
elif path.split("/")[0] + "/" in constants.REPO_TYPES_URL_PREFIXES.values():
if "/" not in path:
# can't list repositories at the repository type level
raise NotImplementedError("Access to repositories lists is not implemented.")
repo_type, path = path.split("/", 1)
repo_type = constants.REPO_TYPES_MAPPING[repo_type]
else:
repo_type = constants.REPO_TYPE_MODEL
if path.count("/") > 0:
if "@" in path:
repo_id, revision_in_path = path.split("@", 1)
if "/" in revision_in_path:
match = SPECIAL_REFS_REVISION_REGEX.search(revision_in_path)
if match is not None and revision in (None, match.group()):
# Handle `refs/convert/parquet` and PR revisions separately
path_in_repo = SPECIAL_REFS_REVISION_REGEX.sub("", revision_in_path).lstrip("/")
revision_in_path = match.group()
else:
revision_in_path, path_in_repo = revision_in_path.split("/", 1)
else:
path_in_repo = ""
revision = _align_revision_in_path_with_revision(unquote(revision_in_path), revision)
repo_and_revision_exist, err = self._repo_and_revision_exist(repo_type, repo_id, revision)
if not repo_and_revision_exist:
_raise_file_not_found(path, err)
else:
revision_in_path = None
repo_id_with_namespace = "/".join(path.split("/")[:2])
path_in_repo_with_namespace = "/".join(path.split("/")[2:])
repo_id_without_namespace = path.split("/")[0]
path_in_repo_without_namespace = "/".join(path.split("/")[1:])
repo_id = repo_id_with_namespace
path_in_repo = path_in_repo_with_namespace
repo_and_revision_exist, err = self._repo_and_revision_exist(repo_type, repo_id, revision)
if not repo_and_revision_exist:
if isinstance(err, (RepositoryNotFoundError, HFValidationError)):
repo_id = repo_id_without_namespace
path_in_repo = path_in_repo_without_namespace
repo_and_revision_exist, _ = self._repo_and_revision_exist(repo_type, repo_id, revision)
if not repo_and_revision_exist:
_raise_file_not_found(path, err)
else:
_raise_file_not_found(path, err)
else:
repo_id = path
path_in_repo = ""
if "@" in path:
repo_id, revision_in_path = path.split("@", 1)
revision = _align_revision_in_path_with_revision(unquote(revision_in_path), revision)
else:
revision_in_path = None
repo_and_revision_exist, _ = self._repo_and_revision_exist(repo_type, repo_id, revision)
if not repo_and_revision_exist:
raise NotImplementedError("Access to repositories lists is not implemented.")
revision = revision if revision is not None else constants.DEFAULT_REVISION
return HfFileSystemResolvedPath(repo_type, repo_id, revision, path_in_repo, _raw_revision=revision_in_path)
def invalidate_cache(self, path: Optional[str] = None) -> None:
"""
Clear the cache for a given path.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.invalidate_cache).
Args:
path (`str`, *optional*):
Path to clear from cache. If not provided, clear the entire cache.
"""
if not path:
self.dircache.clear()
self._repo_and_revision_exists_cache.clear()
else:
resolved_path = self.resolve_path(path)
path = resolved_path.unresolve()
while path:
self.dircache.pop(path, None)
path = self._parent(path)
# Only clear repo cache if path is to repo root
if not resolved_path.path_in_repo:
self._repo_and_revision_exists_cache.pop((resolved_path.repo_type, resolved_path.repo_id, None), None)
self._repo_and_revision_exists_cache.pop(
(resolved_path.repo_type, resolved_path.repo_id, resolved_path.revision), None
)
def _open(
self,
path: str,
mode: str = "rb",
revision: Optional[str] = None,
block_size: Optional[int] = None,
**kwargs,
) -> "HfFileSystemFile":
if "a" in mode:
raise NotImplementedError("Appending to remote files is not yet supported.")
if block_size == 0:
return HfFileSystemStreamFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
else:
return HfFileSystemFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
def _rm(self, path: str, revision: Optional[str] = None, **kwargs) -> None:
resolved_path = self.resolve_path(path, revision=revision)
self._api.delete_file(
path_in_repo=resolved_path.path_in_repo,
repo_id=resolved_path.repo_id,
token=self.token,
repo_type=resolved_path.repo_type,
revision=resolved_path.revision,
commit_message=kwargs.get("commit_message"),
commit_description=kwargs.get("commit_description"),
)
self.invalidate_cache(path=resolved_path.unresolve())
def rm(
self,
path: str,
recursive: bool = False,
maxdepth: Optional[int] = None,
revision: Optional[str] = None,
**kwargs,
) -> None:
"""
Delete files from a repository.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.rm).
<Tip warning={true}>
Note: When possible, use `HfApi.delete_file()` for better performance.
</Tip>
Args:
path (`str`):
Path to delete.
recursive (`bool`, *optional*):
If True, delete directory and all its contents. Defaults to False.
maxdepth (`int`, *optional*):
Maximum number of subdirectories to visit when deleting recursively.
revision (`str`, *optional*):
The git revision to delete from.
"""
resolved_path = self.resolve_path(path, revision=revision)
paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth, revision=revision)
paths_in_repo = [self.resolve_path(path).path_in_repo for path in paths if not self.isdir(path)]
operations = [CommitOperationDelete(path_in_repo=path_in_repo) for path_in_repo in paths_in_repo]
commit_message = f"Delete {path} "
commit_message += "recursively " if recursive else ""
commit_message += f"up to depth {maxdepth} " if maxdepth is not None else ""
# TODO: use `commit_description` to list all the deleted paths?
self._api.create_commit(
repo_id=resolved_path.repo_id,
repo_type=resolved_path.repo_type,
token=self.token,
operations=operations,
revision=resolved_path.revision,
commit_message=kwargs.get("commit_message", commit_message),
commit_description=kwargs.get("commit_description"),
)
self.invalidate_cache(path=resolved_path.unresolve())
def ls(
self, path: str, detail: bool = True, refresh: bool = False, revision: Optional[str] = None, **kwargs
) -> List[Union[str, Dict[str, Any]]]:
"""
List the contents of a directory.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.ls).
<Tip warning={true}>
Note: When possible, use `HfApi.list_repo_tree()` for better performance.
</Tip>
Args:
path (`str`):
Path to the directory.
detail (`bool`, *optional*):
If True, returns a list of dictionaries containing file information. If False,
returns a list of file paths. Defaults to True.
refresh (`bool`, *optional*):
If True, bypass the cache and fetch the latest data. Defaults to False.
revision (`str`, *optional*):
The git revision to list from.
Returns:
`List[Union[str, Dict[str, Any]]]`: List of file paths (if detail=False) or list of file information
dictionaries (if detail=True).
"""
resolved_path = self.resolve_path(path, revision=revision)
path = resolved_path.unresolve()
kwargs = {"expand_info": detail, **kwargs}
try:
out = self._ls_tree(path, refresh=refresh, revision=revision, **kwargs)
except EntryNotFoundError:
# Path could be a file
if not resolved_path.path_in_repo:
_raise_file_not_found(path, None)
out = self._ls_tree(self._parent(path), refresh=refresh, revision=revision, **kwargs)
out = [o for o in out if o["name"] == path]
if len(out) == 0:
_raise_file_not_found(path, None)
return out if detail else [o["name"] for o in out]
def _ls_tree(
self,
path: str,
recursive: bool = False,
refresh: bool = False,
revision: Optional[str] = None,
expand_info: bool = True,
):
resolved_path = self.resolve_path(path, revision=revision)
path = resolved_path.unresolve()
root_path = HfFileSystemResolvedPath(
resolved_path.repo_type,
resolved_path.repo_id,
resolved_path.revision,
path_in_repo="",
_raw_revision=resolved_path._raw_revision,
).unresolve()
out = []
if path in self.dircache and not refresh:
cached_path_infos = self.dircache[path]
out.extend(cached_path_infos)
dirs_not_in_dircache = []
if recursive:
# Use BFS to traverse the cache and build the "recursive "output
# (The Hub uses a so-called "tree first" strategy for the tree endpoint but we sort the output to follow the spec so the result is (eventually) the same)
dirs_to_visit = deque(
[path_info for path_info in cached_path_infos if path_info["type"] == "directory"]
)
while dirs_to_visit:
dir_info = dirs_to_visit.popleft()
if dir_info["name"] not in self.dircache:
dirs_not_in_dircache.append(dir_info["name"])
else:
cached_path_infos = self.dircache[dir_info["name"]]
out.extend(cached_path_infos)
dirs_to_visit.extend(
[path_info for path_info in cached_path_infos if path_info["type"] == "directory"]
)
dirs_not_expanded = []
if expand_info:
# Check if there are directories with non-expanded entries
dirs_not_expanded = [self._parent(o["name"]) for o in out if o["last_commit"] is None]
if (recursive and dirs_not_in_dircache) or (expand_info and dirs_not_expanded):
# If the dircache is incomplete, find the common path of the missing and non-expanded entries
# and extend the output with the result of `_ls_tree(common_path, recursive=True)`
common_prefix = os.path.commonprefix(dirs_not_in_dircache + dirs_not_expanded)
# Get the parent directory if the common prefix itself is not a directory
common_path = (
common_prefix.rstrip("/")
if common_prefix.endswith("/")
or common_prefix == root_path
or common_prefix in chain(dirs_not_in_dircache, dirs_not_expanded)
else self._parent(common_prefix)
)
out = [o for o in out if not o["name"].startswith(common_path + "/")]
for cached_path in self.dircache:
if cached_path.startswith(common_path + "/"):
self.dircache.pop(cached_path, None)
self.dircache.pop(common_path, None)
out.extend(
self._ls_tree(
common_path,
recursive=recursive,
refresh=True,
revision=revision,
expand_info=expand_info,
)
)
else:
tree = self._api.list_repo_tree(
resolved_path.repo_id,
resolved_path.path_in_repo,
recursive=recursive,
expand=expand_info,
revision=resolved_path.revision,
repo_type=resolved_path.repo_type,
)
for path_info in tree:
if isinstance(path_info, RepoFile):
cache_path_info = {
"name": root_path + "/" + path_info.path,
"size": path_info.size,
"type": "file",
"blob_id": path_info.blob_id,
"lfs": path_info.lfs,
"last_commit": path_info.last_commit,
"security": path_info.security,
}
else:
cache_path_info = {
"name": root_path + "/" + path_info.path,
"size": 0,
"type": "directory",
"tree_id": path_info.tree_id,
"last_commit": path_info.last_commit,
}
parent_path = self._parent(cache_path_info["name"])
self.dircache.setdefault(parent_path, []).append(cache_path_info)
out.append(cache_path_info)
return out
def walk(self, path: str, *args, **kwargs) -> Iterator[Tuple[str, List[str], List[str]]]:
"""
Return all files below the given path.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.walk).
Args:
path (`str`):
Root path to list files from.
Returns:
`Iterator[Tuple[str, List[str], List[str]]]`: An iterator of (path, list of directory names, list of file names) tuples.
"""
# Set expand_info=False by default to get a x10 speed boost
kwargs = {"expand_info": kwargs.get("detail", False), **kwargs}
path = self.resolve_path(path, revision=kwargs.get("revision")).unresolve()
yield from super().walk(path, *args, **kwargs)
def glob(self, path: str, **kwargs) -> List[str]:
"""
Find files by glob-matching.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.glob).
Args:
path (`str`):
Path pattern to match.
Returns:
`List[str]`: List of paths matching the pattern.
"""
# Set expand_info=False by default to get a x10 speed boost
kwargs = {"expand_info": kwargs.get("detail", False), **kwargs}
path = self.resolve_path(path, revision=kwargs.get("revision")).unresolve()
return super().glob(path, **kwargs)
def find(
self,
path: str,
maxdepth: Optional[int] = None,
withdirs: bool = False,
detail: bool = False,
refresh: bool = False,
revision: Optional[str] = None,
**kwargs,
) -> Union[List[str], Dict[str, Dict[str, Any]]]:
"""
List all files below path.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.find).
Args:
path (`str`):
Root path to list files from.
maxdepth (`int`, *optional*):
Maximum depth to descend into subdirectories.
withdirs (`bool`, *optional*):
Include directory paths in the output. Defaults to False.
detail (`bool`, *optional*):
If True, returns a dict mapping paths to file information. Defaults to False.
refresh (`bool`, *optional*):
If True, bypass the cache and fetch the latest data. Defaults to False.
revision (`str`, *optional*):
The git revision to list from.
Returns:
`Union[List[str], Dict[str, Dict[str, Any]]]`: List of paths or dict of file information.
"""
if maxdepth:
return super().find(
path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, refresh=refresh, revision=revision, **kwargs
)
resolved_path = self.resolve_path(path, revision=revision)
path = resolved_path.unresolve()
kwargs = {"expand_info": detail, **kwargs}
try:
out = self._ls_tree(path, recursive=True, refresh=refresh, revision=resolved_path.revision, **kwargs)
except EntryNotFoundError:
# Path could be a file
if self.info(path, revision=revision, **kwargs)["type"] == "file":
out = {path: {}}
else:
out = {}
else:
if not withdirs:
out = [o for o in out if o["type"] != "directory"]
else:
# If `withdirs=True`, include the directory itself to be consistent with the spec
path_info = self.info(path, revision=resolved_path.revision, **kwargs)
out = [path_info] + out if path_info["type"] == "directory" else out
out = {o["name"]: o for o in out}
names = sorted(out)
if not detail:
return names
else:
return {name: out[name] for name in names}
def cp_file(self, path1: str, path2: str, revision: Optional[str] = None, **kwargs) -> None:
"""
Copy a file within or between repositories.
<Tip warning={true}>
Note: When possible, use `HfApi.upload_file()` for better performance.
</Tip>
Args:
path1 (`str`):
Source path to copy from.
path2 (`str`):
Destination path to copy to.
revision (`str`, *optional*):
The git revision to copy from.
"""
resolved_path1 = self.resolve_path(path1, revision=revision)
resolved_path2 = self.resolve_path(path2, revision=revision)
same_repo = (
resolved_path1.repo_type == resolved_path2.repo_type and resolved_path1.repo_id == resolved_path2.repo_id
)
if same_repo:
commit_message = f"Copy {path1} to {path2}"
self._api.create_commit(
repo_id=resolved_path1.repo_id,
repo_type=resolved_path1.repo_type,
revision=resolved_path2.revision,
commit_message=kwargs.get("commit_message", commit_message),
commit_description=kwargs.get("commit_description", ""),
operations=[
CommitOperationCopy(
src_path_in_repo=resolved_path1.path_in_repo,
path_in_repo=resolved_path2.path_in_repo,
src_revision=resolved_path1.revision,
)
],
)
else:
with self.open(path1, "rb", revision=resolved_path1.revision) as f:
content = f.read()
commit_message = f"Copy {path1} to {path2}"
self._api.upload_file(
path_or_fileobj=content,
path_in_repo=resolved_path2.path_in_repo,
repo_id=resolved_path2.repo_id,
token=self.token,
repo_type=resolved_path2.repo_type,
revision=resolved_path2.revision,
commit_message=kwargs.get("commit_message", commit_message),
commit_description=kwargs.get("commit_description"),
)
self.invalidate_cache(path=resolved_path1.unresolve())
self.invalidate_cache(path=resolved_path2.unresolve())
def modified(self, path: str, **kwargs) -> datetime:
"""
Get the last modified time of a file.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.modified).
Args:
path (`str`):
Path to the file.
Returns:
`datetime`: Last commit date of the file.
"""
info = self.info(path, **kwargs)
return info["last_commit"]["date"]
def info(self, path: str, refresh: bool = False, revision: Optional[str] = None, **kwargs) -> Dict[str, Any]:
"""
Get information about a file or directory.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.info).
<Tip warning={true}>
Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
</Tip>
Args:
path (`str`):
Path to get info for.
refresh (`bool`, *optional*):
If True, bypass the cache and fetch the latest data. Defaults to False.
revision (`str`, *optional*):
The git revision to get info from.
Returns:
`Dict[str, Any]`: Dictionary containing file information (type, size, commit info, etc.).
"""
resolved_path = self.resolve_path(path, revision=revision)
path = resolved_path.unresolve()
expand_info = kwargs.get(
"expand_info", True
) # don't expose it as a parameter in the public API to follow the spec
if not resolved_path.path_in_repo:
# Path is the root directory
out = {
"name": path,
"size": 0,
"type": "directory",
}
if expand_info:
last_commit = self._api.list_repo_commits(
resolved_path.repo_id, repo_type=resolved_path.repo_type, revision=resolved_path.revision
)[-1]
out = {
**out,
"tree_id": None, # TODO: tree_id of the root directory?
"last_commit": LastCommitInfo(
oid=last_commit.commit_id, title=last_commit.title, date=last_commit.created_at
),
}
else:
out = None
parent_path = self._parent(path)
if not expand_info and parent_path not in self.dircache:
# Fill the cache with cheap call
self.ls(parent_path, expand_info=False)
if parent_path in self.dircache:
# Check if the path is in the cache
out1 = [o for o in self.dircache[parent_path] if o["name"] == path]
if not out1:
_raise_file_not_found(path, None)
out = out1[0]
if refresh or out is None or (expand_info and out and out["last_commit"] is None):
paths_info = self._api.get_paths_info(
resolved_path.repo_id,
resolved_path.path_in_repo,
expand=expand_info,
revision=resolved_path.revision,
repo_type=resolved_path.repo_type,
)
if not paths_info:
_raise_file_not_found(path, None)
path_info = paths_info[0]
root_path = HfFileSystemResolvedPath(
resolved_path.repo_type,
resolved_path.repo_id,
resolved_path.revision,
path_in_repo="",
_raw_revision=resolved_path._raw_revision,
).unresolve()
if isinstance(path_info, RepoFile):
out = {
"name": root_path + "/" + path_info.path,
"size": path_info.size,
"type": "file",
"blob_id": path_info.blob_id,
"lfs": path_info.lfs,
"last_commit": path_info.last_commit,
"security": path_info.security,
}
else:
out = {
"name": root_path + "/" + path_info.path,
"size": 0,
"type": "directory",
"tree_id": path_info.tree_id,
"last_commit": path_info.last_commit,
}
if not expand_info:
out = {k: out[k] for k in ["name", "size", "type"]}
assert out is not None
return out
def exists(self, path, **kwargs):
"""
Check if a file exists.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.exists).
<Tip warning={true}>
Note: When possible, use `HfApi.file_exists()` for better performance.
</Tip>
Args:
path (`str`):
Path to check.
Returns:
`bool`: True if file exists, False otherwise.
"""
try:
if kwargs.get("refresh", False):
self.invalidate_cache(path)
self.info(path, **{**kwargs, "expand_info": False})
return True
except: # noqa: E722
return False
def isdir(self, path):
"""
Check if a path is a directory.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.isdir).
Args:
path (`str`):
Path to check.
Returns:
`bool`: True if path is a directory, False otherwise.
"""
try:
return self.info(path, expand_info=False)["type"] == "directory"
except OSError:
return False
def isfile(self, path):
"""
Check if a path is a file.
For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.isfile).
Args:
path (`str`):
Path to check.
Returns:
`bool`: True if path is a file, False otherwise.
"""
try:
return self.info(path, expand_info=False)["type"] == "file"
except: # noqa: E722
return False
def url(self, path: str) -> str:
"""
Get the HTTP URL of the given path.
Args:
path (`str`):
Path to get URL for.
Returns:
`str`: HTTP URL to access the file or directory on the Hub.
"""
resolved_path = self.resolve_path(path)
url = hf_hub_url(
resolved_path.repo_id,
resolved_path.path_in_repo,
repo_type=resolved_path.repo_type,
revision=resolved_path.revision,
endpoint=self.endpoint,
)
if self.isdir(path):
url = url.replace("/resolve/", "/tree/", 1)
return url
def get_file(self, rpath, lpath, callback=_DEFAULT_CALLBACK, outfile=None, **kwargs) -> None:
"""
Copy single remote file to local.
<Tip warning={true}>
Note: When possible, use `HfApi.hf_hub_download()` for better performance.
</Tip>
Args:
rpath (`str`):
Remote path to download from.
lpath (`str`):
Local path to download to.
callback (`Callback`, *optional*):
Optional callback to track download progress. Defaults to no callback.
outfile (`IO`, *optional*):
Optional file-like object to write to. If provided, `lpath` is ignored.
"""
revision = kwargs.get("revision")
unhandled_kwargs = set(kwargs.keys()) - {"revision"}
if not isinstance(callback, (NoOpCallback, TqdmCallback)) or len(unhandled_kwargs) > 0:
# for now, let's not handle custom callbacks
# and let's not handle custom kwargs
return super().get_file(rpath, lpath, callback=callback, outfile=outfile, **kwargs)
# Taken from https://github.com/fsspec/filesystem_spec/blob/47b445ae4c284a82dd15e0287b1ffc410e8fc470/fsspec/spec.py#L883
if isfilelike(lpath):
outfile = lpath
elif self.isdir(rpath):
os.makedirs(lpath, exist_ok=True)
return None
if isinstance(lpath, (str, Path)): # otherwise, let's assume it's a file-like object
os.makedirs(os.path.dirname(lpath), exist_ok=True)
# Open file if not already open
close_file = False
if outfile is None:
outfile = open(lpath, "wb")
close_file = True
initial_pos = outfile.tell()
# Custom implementation of `get_file` to use `http_get`.
resolve_remote_path = self.resolve_path(rpath, revision=revision)
expected_size = self.info(rpath, revision=revision)["size"]
callback.set_size(expected_size)
try:
http_get(
url=hf_hub_url(
repo_id=resolve_remote_path.repo_id,
revision=resolve_remote_path.revision,
filename=resolve_remote_path.path_in_repo,
repo_type=resolve_remote_path.repo_type,
endpoint=self.endpoint,
),
temp_file=outfile,
displayed_filename=rpath,
expected_size=expected_size,
resume_size=0,
headers=self._api._build_hf_headers(),
_tqdm_bar=callback.tqdm if isinstance(callback, TqdmCallback) else None,
)
outfile.seek(initial_pos)
finally:
# Close file only if we opened it ourselves
if close_file:
outfile.close()
@property
def transaction(self):
"""A context within which files are committed together upon exit
Requires the file class to implement `.commit()` and `.discard()`
for the normal and exception cases.
"""
# Taken from https://github.com/fsspec/filesystem_spec/blob/3fbb6fee33b46cccb015607630843dea049d3243/fsspec/spec.py#L231
# See https://github.com/huggingface/huggingface_hub/issues/1733
raise NotImplementedError("Transactional commits are not supported.")
def start_transaction(self):
"""Begin write transaction for deferring files, non-context version"""
# Taken from https://github.com/fsspec/filesystem_spec/blob/3fbb6fee33b46cccb015607630843dea049d3243/fsspec/spec.py#L241
# See https://github.com/huggingface/huggingface_hub/issues/1733
raise NotImplementedError("Transactional commits are not supported.") | class_definition | 2,016 | 39,401 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/hf_file_system.py | null | 116 |
class HfFileSystemFile(fsspec.spec.AbstractBufferedFile):
def __init__(self, fs: HfFileSystem, path: str, revision: Optional[str] = None, **kwargs):
try:
self.resolved_path = fs.resolve_path(path, revision=revision)
except FileNotFoundError as e:
if "w" in kwargs.get("mode", ""):
raise FileNotFoundError(
f"{e}.\nMake sure the repository and revision exist before writing data."
) from e
raise
# avoid an unnecessary .info() call with expensive expand_info=True to instantiate .details
if kwargs.get("mode", "rb") == "rb":
self.details = fs.info(self.resolved_path.unresolve(), expand_info=False)
super().__init__(fs, self.resolved_path.unresolve(), **kwargs)
self.fs: HfFileSystem
def __del__(self):
if not hasattr(self, "resolved_path"):
# Means that the constructor failed. Nothing to do.
return
return super().__del__()
def _fetch_range(self, start: int, end: int) -> bytes:
headers = {
"range": f"bytes={start}-{end - 1}",
**self.fs._api._build_hf_headers(),
}
url = hf_hub_url(
repo_id=self.resolved_path.repo_id,
revision=self.resolved_path.revision,
filename=self.resolved_path.path_in_repo,
repo_type=self.resolved_path.repo_type,
endpoint=self.fs.endpoint,
)
r = http_backoff(
"GET",
url,
headers=headers,
retry_on_status_codes=(500, 502, 503, 504),
timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
)
hf_raise_for_status(r)
return r.content
def _initiate_upload(self) -> None:
self.temp_file = tempfile.NamedTemporaryFile(prefix="hffs-", delete=False)
def _upload_chunk(self, final: bool = False) -> None:
self.buffer.seek(0)
block = self.buffer.read()
self.temp_file.write(block)
if final:
self.temp_file.close()
self.fs._api.upload_file(
path_or_fileobj=self.temp_file.name,
path_in_repo=self.resolved_path.path_in_repo,
repo_id=self.resolved_path.repo_id,
token=self.fs.token,
repo_type=self.resolved_path.repo_type,
revision=self.resolved_path.revision,
commit_message=self.kwargs.get("commit_message"),
commit_description=self.kwargs.get("commit_description"),
)
os.remove(self.temp_file.name)
self.fs.invalidate_cache(
path=self.resolved_path.unresolve(),
)
def read(self, length=-1):
"""Read remote file.
If `length` is not provided or is -1, the entire file is downloaded and read. On POSIX systems and if
`hf_transfer` is not enabled, the file is loaded in memory directly. Otherwise, the file is downloaded to a
temporary file and read from there.
"""
if self.mode == "rb" and (length is None or length == -1) and self.loc == 0:
with self.fs.open(self.path, "rb", block_size=0) as f: # block_size=0 enables fast streaming
return f.read()
return super().read(length)
def url(self) -> str:
return self.fs.url(self.path) | class_definition | 39,404 | 42,816 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/hf_file_system.py | null | 117 |
class HfFileSystemStreamFile(fsspec.spec.AbstractBufferedFile):
def __init__(
self,
fs: HfFileSystem,
path: str,
mode: str = "rb",
revision: Optional[str] = None,
block_size: int = 0,
cache_type: str = "none",
**kwargs,
):
if block_size != 0:
raise ValueError(f"HfFileSystemStreamFile only supports block_size=0 but got {block_size}")
if cache_type != "none":
raise ValueError(f"HfFileSystemStreamFile only supports cache_type='none' but got {cache_type}")
if "w" in mode:
raise ValueError(f"HfFileSystemStreamFile only supports reading but got mode='{mode}'")
try:
self.resolved_path = fs.resolve_path(path, revision=revision)
except FileNotFoundError as e:
if "w" in kwargs.get("mode", ""):
raise FileNotFoundError(
f"{e}.\nMake sure the repository and revision exist before writing data."
) from e
# avoid an unnecessary .info() call to instantiate .details
self.details = {"name": self.resolved_path.unresolve(), "size": None}
super().__init__(
fs, self.resolved_path.unresolve(), mode=mode, block_size=block_size, cache_type=cache_type, **kwargs
)
self.response: Optional[Response] = None
self.fs: HfFileSystem
def seek(self, loc: int, whence: int = 0):
if loc == 0 and whence == 1:
return
if loc == self.loc and whence == 0:
return
raise ValueError("Cannot seek streaming HF file")
def read(self, length: int = -1):
read_args = (length,) if length >= 0 else ()
if self.response is None or self.response.raw.isclosed():
url = hf_hub_url(
repo_id=self.resolved_path.repo_id,
revision=self.resolved_path.revision,
filename=self.resolved_path.path_in_repo,
repo_type=self.resolved_path.repo_type,
endpoint=self.fs.endpoint,
)
self.response = http_backoff(
"GET",
url,
headers=self.fs._api._build_hf_headers(),
retry_on_status_codes=(500, 502, 503, 504),
stream=True,
timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
)
hf_raise_for_status(self.response)
try:
out = self.response.raw.read(*read_args)
except Exception:
self.response.close()
# Retry by recreating the connection
url = hf_hub_url(
repo_id=self.resolved_path.repo_id,
revision=self.resolved_path.revision,
filename=self.resolved_path.path_in_repo,
repo_type=self.resolved_path.repo_type,
endpoint=self.fs.endpoint,
)
self.response = http_backoff(
"GET",
url,
headers={"Range": "bytes=%d-" % self.loc, **self.fs._api._build_hf_headers()},
retry_on_status_codes=(500, 502, 503, 504),
stream=True,
timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
)
hf_raise_for_status(self.response)
try:
out = self.response.raw.read(*read_args)
except Exception:
self.response.close()
raise
self.loc += len(out)
return out
def url(self) -> str:
return self.fs.url(self.path)
def __del__(self):
if not hasattr(self, "resolved_path"):
# Means that the constructor failed. Nothing to do.
return
return super().__del__()
def __reduce__(self):
return reopen, (self.fs, self.path, self.mode, self.blocksize, self.cache.name) | class_definition | 42,819 | 46,713 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/hf_file_system.py | null | 118 |
class CommitOperationDelete:
"""
Data structure holding necessary info to delete a file or a folder from a repository
on the Hub.
Args:
path_in_repo (`str`):
Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"`
for a file or `"checkpoints/1fec34a/"` for a folder.
is_folder (`bool` or `Literal["auto"]`, *optional*)
Whether the Delete Operation applies to a folder or not. If "auto", the path
type (file or folder) is guessed automatically by looking if path ends with
a "/" (folder) or not (file). To explicitly set the path type, you can set
`is_folder=True` or `is_folder=False`.
"""
path_in_repo: str
is_folder: Union[bool, Literal["auto"]] = "auto"
def __post_init__(self):
self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
if self.is_folder == "auto":
self.is_folder = self.path_in_repo.endswith("/")
if not isinstance(self.is_folder, bool):
raise ValueError(
f"Wrong value for `is_folder`. Must be one of [`True`, `False`, `'auto'`]. Got '{self.is_folder}'."
) | class_definition | 1,307 | 2,513 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_api.py | null | 119 |
class CommitOperationCopy:
"""
Data structure holding necessary info to copy a file in a repository on the Hub.
Limitations:
- Only LFS files can be copied. To copy a regular file, you need to download it locally and re-upload it
- Cross-repository copies are not supported.
Note: you can combine a [`CommitOperationCopy`] and a [`CommitOperationDelete`] to rename an LFS file on the Hub.
Args:
src_path_in_repo (`str`):
Relative filepath in the repo of the file to be copied, e.g. `"checkpoints/1fec34a/weights.bin"`.
path_in_repo (`str`):
Relative filepath in the repo where to copy the file, e.g. `"checkpoints/1fec34a/weights_copy.bin"`.
src_revision (`str`, *optional*):
The git revision of the file to be copied. Can be any valid git revision.
Default to the target commit revision.
"""
src_path_in_repo: str
path_in_repo: str
src_revision: Optional[str] = None
def __post_init__(self):
self.src_path_in_repo = _validate_path_in_repo(self.src_path_in_repo)
self.path_in_repo = _validate_path_in_repo(self.path_in_repo) | class_definition | 2,527 | 3,696 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_api.py | null | 120 |
class CommitOperationAdd:
"""
Data structure holding necessary info to upload a file to a repository on the Hub.
Args:
path_in_repo (`str`):
Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"`
path_or_fileobj (`str`, `Path`, `bytes`, or `BinaryIO`):
Either:
- a path to a local file (as `str` or `pathlib.Path`) to upload
- a buffer of bytes (`bytes`) holding the content of the file to upload
- a "file object" (subclass of `io.BufferedIOBase`), typically obtained
with `open(path, "rb")`. It must support `seek()` and `tell()` methods.
Raises:
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If `path_or_fileobj` is not one of `str`, `Path`, `bytes` or `io.BufferedIOBase`.
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If `path_or_fileobj` is a `str` or `Path` but not a path to an existing file.
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
If `path_or_fileobj` is a `io.BufferedIOBase` but it doesn't support both
`seek()` and `tell()`.
"""
path_in_repo: str
path_or_fileobj: Union[str, Path, bytes, BinaryIO]
upload_info: UploadInfo = field(init=False, repr=False)
# Internal attributes
# set to "lfs" or "regular" once known
_upload_mode: Optional[UploadMode] = field(init=False, repr=False, default=None)
# set to True if .gitignore rules prevent the file from being uploaded as LFS
# (server-side check)
_should_ignore: Optional[bool] = field(init=False, repr=False, default=None)
# set to the remote OID of the file if it has already been uploaded
# useful to determine if a commit will be empty or not
_remote_oid: Optional[str] = field(init=False, repr=False, default=None)
# set to True once the file has been uploaded as LFS
_is_uploaded: bool = field(init=False, repr=False, default=False)
# set to True once the file has been committed
_is_committed: bool = field(init=False, repr=False, default=False)
def __post_init__(self) -> None:
"""Validates `path_or_fileobj` and compute `upload_info`."""
self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
# Validate `path_or_fileobj` value
if isinstance(self.path_or_fileobj, Path):
self.path_or_fileobj = str(self.path_or_fileobj)
if isinstance(self.path_or_fileobj, str):
path_or_fileobj = os.path.normpath(os.path.expanduser(self.path_or_fileobj))
if not os.path.isfile(path_or_fileobj):
raise ValueError(f"Provided path: '{path_or_fileobj}' is not a file on the local file system")
elif not isinstance(self.path_or_fileobj, (io.BufferedIOBase, bytes)):
# ^^ Inspired from: https://stackoverflow.com/questions/44584829/how-to-determine-if-file-is-opened-in-binary-or-text-mode
raise ValueError(
"path_or_fileobj must be either an instance of str, bytes or"
" io.BufferedIOBase. If you passed a file-like object, make sure it is"
" in binary mode."
)
if isinstance(self.path_or_fileobj, io.BufferedIOBase):
try:
self.path_or_fileobj.tell()
self.path_or_fileobj.seek(0, os.SEEK_CUR)
except (OSError, AttributeError) as exc:
raise ValueError(
"path_or_fileobj is a file-like object but does not implement seek() and tell()"
) from exc
# Compute "upload_info" attribute
if isinstance(self.path_or_fileobj, str):
self.upload_info = UploadInfo.from_path(self.path_or_fileobj)
elif isinstance(self.path_or_fileobj, bytes):
self.upload_info = UploadInfo.from_bytes(self.path_or_fileobj)
else:
self.upload_info = UploadInfo.from_fileobj(self.path_or_fileobj)
@contextmanager
def as_file(self, with_tqdm: bool = False) -> Iterator[BinaryIO]:
"""
A context manager that yields a file-like object allowing to read the underlying
data behind `path_or_fileobj`.
Args:
with_tqdm (`bool`, *optional*, defaults to `False`):
If True, iterating over the file object will display a progress bar. Only
works if the file-like object is a path to a file. Pure bytes and buffers
are not supported.
Example:
```python
>>> operation = CommitOperationAdd(
... path_in_repo="remote/dir/weights.h5",
... path_or_fileobj="./local/weights.h5",
... )
CommitOperationAdd(path_in_repo='remote/dir/weights.h5', path_or_fileobj='./local/weights.h5')
>>> with operation.as_file() as file:
... content = file.read()
>>> with operation.as_file(with_tqdm=True) as file:
... while True:
... data = file.read(1024)
... if not data:
... break
config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]
>>> with operation.as_file(with_tqdm=True) as file:
... requests.put(..., data=file)
config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]
```
"""
if isinstance(self.path_or_fileobj, str) or isinstance(self.path_or_fileobj, Path):
if with_tqdm:
with tqdm_stream_file(self.path_or_fileobj) as file:
yield file
else:
with open(self.path_or_fileobj, "rb") as file:
yield file
elif isinstance(self.path_or_fileobj, bytes):
yield io.BytesIO(self.path_or_fileobj)
elif isinstance(self.path_or_fileobj, io.BufferedIOBase):
prev_pos = self.path_or_fileobj.tell()
yield self.path_or_fileobj
self.path_or_fileobj.seek(prev_pos, io.SEEK_SET)
def b64content(self) -> bytes:
"""
The base64-encoded content of `path_or_fileobj`
Returns: `bytes`
"""
with self.as_file() as file:
return base64.b64encode(file.read())
@property
def _local_oid(self) -> Optional[str]:
"""Return the OID of the local file.
This OID is then compared to `self._remote_oid` to check if the file has changed compared to the remote one.
If the file did not change, we won't upload it again to prevent empty commits.
For LFS files, the OID corresponds to the SHA256 of the file content (used a LFS ref).
For regular files, the OID corresponds to the SHA1 of the file content.
Note: this is slightly different to git OID computation since the oid of an LFS file is usually the git-SHA1 of the
pointer file content (not the actual file content). However, using the SHA256 is enough to detect changes
and more convenient client-side.
"""
if self._upload_mode is None:
return None
elif self._upload_mode == "lfs":
return self.upload_info.sha256.hex()
else:
# Regular file => compute sha1
# => no need to read by chunk since the file is guaranteed to be <=5MB.
with self.as_file() as file:
return sha.git_hash(file.read()) | class_definition | 3,710 | 11,361 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/_commit_api.py | null | 121 |
class StateDictSplit:
is_sharded: bool = field(init=False)
metadata: Dict[str, Any]
filename_to_tensors: Dict[str, List[str]]
tensor_to_filename: Dict[str, str]
def __post_init__(self):
self.is_sharded = len(self.filename_to_tensors) > 1 | class_definition | 1,083 | 1,349 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/serialization/_base.py | null | 122 |
class DDUFEntry:
"""Object representing a file entry in a DDUF file.
See [`read_dduf_file`] for how to read a DDUF file.
Attributes:
filename (str):
The name of the file in the DDUF archive.
offset (int):
The offset of the file in the DDUF archive.
length (int):
The length of the file in the DDUF archive.
dduf_path (str):
The path to the DDUF archive (for internal use).
"""
filename: str
length: int
offset: int
dduf_path: Path = field(repr=False)
@contextmanager
def as_mmap(self) -> Generator[bytes, None, None]:
"""Open the file as a memory-mapped file.
Useful to load safetensors directly from the file.
Example:
```py
>>> import safetensors.torch
>>> with entry.as_mmap() as mm:
... tensors = safetensors.torch.load(mm)
```
"""
with self.dduf_path.open("rb") as f:
with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mm:
yield mm[self.offset : self.offset + self.length]
def read_text(self, encoding: str = "utf-8") -> str:
"""Read the file as text.
Useful for '.txt' and '.json' entries.
Example:
```py
>>> import json
>>> index = json.loads(entry.read_text())
```
"""
with self.dduf_path.open("rb") as f:
f.seek(self.offset)
return f.read(self.length).decode(encoding=encoding) | class_definition | 725 | 2,303 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/serialization/_dduf.py | null | 123 |
class _IncompatibleKeys(namedtuple("IncompatibleKeys", ["missing_keys", "unexpected_keys"])):
"""
This is used to report missing and unexpected keys in the state dict.
Taken from https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/module.py#L52.
"""
def __repr__(self) -> str:
if not self.missing_keys and not self.unexpected_keys:
return "<All keys matched successfully>"
return super().__repr__()
__str__ = __repr__ | class_definition | 44,234 | 44,716 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/serialization/_torch.py | null | 124 |
class UniqueRequestIdAdapter(HTTPAdapter):
X_AMZN_TRACE_ID = "X-Amzn-Trace-Id"
def add_headers(self, request, **kwargs):
super().add_headers(request, **kwargs)
# Add random request ID => easier for server-side debug
if X_AMZN_TRACE_ID not in request.headers:
request.headers[X_AMZN_TRACE_ID] = request.headers.get(X_REQUEST_ID) or str(uuid.uuid4())
# Add debug log
has_token = str(request.headers.get("authorization", "")).startswith("Bearer hf_")
logger.debug(
f"Request {request.headers[X_AMZN_TRACE_ID]}: {request.method} {request.url} (authenticated: {has_token})"
)
def send(self, request: PreparedRequest, *args, **kwargs) -> Response:
"""Catch any RequestException to append request id to the error message for debugging."""
try:
return super().send(request, *args, **kwargs)
except requests.RequestException as e:
request_id = request.headers.get(X_AMZN_TRACE_ID)
if request_id is not None:
# Taken from https://stackoverflow.com/a/58270258
e.args = (*e.args, f"(Request ID: {request_id})")
raise | class_definition | 2,057 | 3,261 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_http.py | null | 125 |
class OfflineAdapter(HTTPAdapter):
def send(self, request: PreparedRequest, *args, **kwargs) -> Response:
raise OfflineModeIsEnabled(
f"Cannot reach {request.url}: offline mode is enabled. To disable it, please unset the `HF_HUB_OFFLINE` environment variable."
) | class_definition | 3,264 | 3,558 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_http.py | null | 126 |
class tqdm(old_tqdm):
"""
Class to override `disable` argument in case progress bars are globally disabled.
Taken from https://github.com/tqdm/tqdm/issues/619#issuecomment-619639324.
"""
def __init__(self, *args, **kwargs):
name = kwargs.pop("name", None) # do not pass `name` to `tqdm`
if are_progress_bars_disabled(name):
kwargs["disable"] = True
super().__init__(*args, **kwargs)
def __delattr__(self, attr: str) -> None:
"""Fix for https://github.com/huggingface/huggingface_hub/issues/1603"""
try:
super().__delattr__(attr)
except AttributeError:
if attr != "_lock":
raise | class_definition | 7,617 | 8,322 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/tqdm.py | null | 127 |
class HfFolder:
path_token = Path(constants.HF_TOKEN_PATH)
# Private attribute. Will be removed in v0.15
_old_path_token = Path(constants._OLD_HF_TOKEN_PATH)
# TODO: deprecate when adapted in transformers/datasets/gradio
# @_deprecate_method(version="1.0", message="Use `huggingface_hub.login` instead.")
@classmethod
def save_token(cls, token: str) -> None:
"""
Save token, creating folder as needed.
Token is saved in the huggingface home folder. You can configure it by setting
the `HF_HOME` environment variable.
Args:
token (`str`):
The token to save to the [`HfFolder`]
"""
cls.path_token.parent.mkdir(parents=True, exist_ok=True)
cls.path_token.write_text(token)
# TODO: deprecate when adapted in transformers/datasets/gradio
# @_deprecate_method(version="1.0", message="Use `huggingface_hub.get_token` instead.")
@classmethod
def get_token(cls) -> Optional[str]:
"""
Get token or None if not existent.
This method is deprecated in favor of [`huggingface_hub.get_token`] but is kept for backward compatibility.
Its behavior is the same as [`huggingface_hub.get_token`].
Returns:
`str` or `None`: The token, `None` if it doesn't exist.
"""
# 0. Check if token exist in old path but not new location
try:
cls._copy_to_new_path_and_warn()
except Exception: # if not possible (e.g. PermissionError), do not raise
pass
return get_token()
# TODO: deprecate when adapted in transformers/datasets/gradio
# @_deprecate_method(version="1.0", message="Use `huggingface_hub.logout` instead.")
@classmethod
def delete_token(cls) -> None:
"""
Deletes the token from storage. Does not fail if token does not exist.
"""
try:
cls.path_token.unlink()
except FileNotFoundError:
pass
try:
cls._old_path_token.unlink()
except FileNotFoundError:
pass
@classmethod
def _copy_to_new_path_and_warn(cls):
if cls._old_path_token.exists() and not cls.path_token.exists():
cls.save_token(cls._old_path_token.read_text())
warnings.warn(
f"A token has been found in `{cls._old_path_token}`. This is the old"
" path where tokens were stored. The new location is"
f" `{cls.path_token}` which is configurable using `HF_HOME` environment"
" variable. Your token has been copied to this new location. You can"
" now safely delete the old token file manually or use"
" `huggingface-cli logout`."
) | class_definition | 813 | 3,611 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_hf_folder.py | null | 128 |
class SliceFileObj(AbstractContextManager):
"""
Utility context manager to read a *slice* of a seekable file-like object as a seekable, file-like object.
This is NOT thread safe
Inspired by stackoverflow.com/a/29838711/593036
Credits to @julien-c
Args:
fileobj (`BinaryIO`):
A file-like object to slice. MUST implement `tell()` and `seek()` (and `read()` of course).
`fileobj` will be reset to its original position when exiting the context manager.
seek_from (`int`):
The start of the slice (offset from position 0 in bytes).
read_limit (`int`):
The maximum number of bytes to read from the slice.
Attributes:
previous_position (`int`):
The previous position
Examples:
Reading 200 bytes with an offset of 128 bytes from a file (ie bytes 128 to 327):
```python
>>> with open("path/to/file", "rb") as file:
... with SliceFileObj(file, seek_from=128, read_limit=200) as fslice:
... fslice.read(...)
```
Reading a file in chunks of 512 bytes
```python
>>> import os
>>> chunk_size = 512
>>> file_size = os.getsize("path/to/file")
>>> with open("path/to/file", "rb") as file:
... for chunk_idx in range(ceil(file_size / chunk_size)):
... with SliceFileObj(file, seek_from=chunk_idx * chunk_size, read_limit=chunk_size) as fslice:
... chunk = fslice.read(...)
```
"""
def __init__(self, fileobj: BinaryIO, seek_from: int, read_limit: int):
self.fileobj = fileobj
self.seek_from = seek_from
self.read_limit = read_limit
def __enter__(self):
self._previous_position = self.fileobj.tell()
end_of_stream = self.fileobj.seek(0, os.SEEK_END)
self._len = min(self.read_limit, end_of_stream - self.seek_from)
# ^^ The actual number of bytes that can be read from the slice
self.fileobj.seek(self.seek_from, io.SEEK_SET)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.fileobj.seek(self._previous_position, io.SEEK_SET)
def read(self, n: int = -1):
pos = self.tell()
if pos >= self._len:
return b""
remaining_amount = self._len - pos
data = self.fileobj.read(remaining_amount if n < 0 else min(n, remaining_amount))
return data
def tell(self) -> int:
return self.fileobj.tell() - self.seek_from
def seek(self, offset: int, whence: int = os.SEEK_SET) -> int:
start = self.seek_from
end = start + self._len
if whence in (os.SEEK_SET, os.SEEK_END):
offset = start + offset if whence == os.SEEK_SET else end + offset
offset = max(start, min(offset, end))
whence = os.SEEK_SET
elif whence == os.SEEK_CUR:
cur_pos = self.fileobj.tell()
offset = max(start - cur_pos, min(offset, end - cur_pos))
else:
raise ValueError(f"whence value {whence} is not supported")
return self.fileobj.seek(offset, whence) - self.seek_from
def __iter__(self):
yield self.read(n=4 * 1024 * 1024) | class_definition | 743 | 3,956 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_lfs.py | null | 129 |
class TensorInfo:
"""Information about a tensor.
For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format.
Attributes:
dtype (`str`):
The data type of the tensor ("F64", "F32", "F16", "BF16", "I64", "I32", "I16", "I8", "U8", "BOOL").
shape (`List[int]`):
The shape of the tensor.
data_offsets (`Tuple[int, int]`):
The offsets of the data in the file as a tuple `[BEGIN, END]`.
parameter_count (`int`):
The number of parameters in the tensor.
"""
dtype: DTYPE_T
shape: List[int]
data_offsets: Tuple[int, int]
parameter_count: int = field(init=False)
def __post_init__(self) -> None:
# Taken from https://stackoverflow.com/a/13840436
try:
self.parameter_count = functools.reduce(operator.mul, self.shape)
except TypeError:
self.parameter_count = 1 # scalar value has no shape | class_definition | 306 | 1,305 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_safetensors.py | null | 130 |
class SafetensorsFileMetadata:
"""Metadata for a Safetensors file hosted on the Hub.
This class is returned by [`parse_safetensors_file_metadata`].
For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format.
Attributes:
metadata (`Dict`):
The metadata contained in the file.
tensors (`Dict[str, TensorInfo]`):
A map of all tensors. Keys are tensor names and values are information about the corresponding tensor, as a
[`TensorInfo`] object.
parameter_count (`Dict[str, int]`):
A map of the number of parameters per data type. Keys are data types and values are the number of parameters
of that data type.
"""
metadata: Dict[str, str]
tensors: Dict[TENSOR_NAME_T, TensorInfo]
parameter_count: Dict[DTYPE_T, int] = field(init=False)
def __post_init__(self) -> None:
parameter_count: Dict[DTYPE_T, int] = defaultdict(int)
for tensor in self.tensors.values():
parameter_count[tensor.dtype] += tensor.parameter_count
self.parameter_count = dict(parameter_count) | class_definition | 1,319 | 2,491 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_safetensors.py | null | 131 |
class SafetensorsRepoMetadata:
"""Metadata for a Safetensors repo.
A repo is considered to be a Safetensors repo if it contains either a 'model.safetensors' weight file (non-shared
model) or a 'model.safetensors.index.json' index file (sharded model) at its root.
This class is returned by [`get_safetensors_metadata`].
For more details regarding the safetensors format, check out https://huggingface.co/docs/safetensors/index#format.
Attributes:
metadata (`Dict`, *optional*):
The metadata contained in the 'model.safetensors.index.json' file, if it exists. Only populated for sharded
models.
sharded (`bool`):
Whether the repo contains a sharded model or not.
weight_map (`Dict[str, str]`):
A map of all weights. Keys are tensor names and values are filenames of the files containing the tensors.
files_metadata (`Dict[str, SafetensorsFileMetadata]`):
A map of all files metadata. Keys are filenames and values are the metadata of the corresponding file, as
a [`SafetensorsFileMetadata`] object.
parameter_count (`Dict[str, int]`):
A map of the number of parameters per data type. Keys are data types and values are the number of parameters
of that data type.
"""
metadata: Optional[Dict]
sharded: bool
weight_map: Dict[TENSOR_NAME_T, FILENAME_T] # tensor name -> filename
files_metadata: Dict[FILENAME_T, SafetensorsFileMetadata] # filename -> metadata
parameter_count: Dict[DTYPE_T, int] = field(init=False)
def __post_init__(self) -> None:
parameter_count: Dict[DTYPE_T, int] = defaultdict(int)
for file_metadata in self.files_metadata.values():
for dtype, nb_parameters_ in file_metadata.parameter_count.items():
parameter_count[dtype] += nb_parameters_
self.parameter_count = dict(parameter_count) | class_definition | 2,505 | 4,457 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_safetensors.py | null | 132 |
class CachedFileInfo:
"""Frozen data structure holding information about a single cached file.
Args:
file_name (`str`):
Name of the file. Example: `config.json`.
file_path (`Path`):
Path of the file in the `snapshots` directory. The file path is a symlink
referring to a blob in the `blobs` folder.
blob_path (`Path`):
Path of the blob file. This is equivalent to `file_path.resolve()`.
size_on_disk (`int`):
Size of the blob file in bytes.
blob_last_accessed (`float`):
Timestamp of the last time the blob file has been accessed (from any
revision).
blob_last_modified (`float`):
Timestamp of the last time the blob file has been modified/created.
<Tip warning={true}>
`blob_last_accessed` and `blob_last_modified` reliability can depend on the OS you
are using. See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
for more details.
</Tip>
"""
file_name: str
file_path: Path
blob_path: Path
size_on_disk: int
blob_last_accessed: float
blob_last_modified: float
@property
def blob_last_accessed_str(self) -> str:
"""
(property) Timestamp of the last time the blob file has been accessed (from any
revision), returned as a human-readable string.
Example: "2 weeks ago".
"""
return _format_timesince(self.blob_last_accessed)
@property
def blob_last_modified_str(self) -> str:
"""
(property) Timestamp of the last time the blob file has been modified, returned
as a human-readable string.
Example: "2 weeks ago".
"""
return _format_timesince(self.blob_last_modified)
@property
def size_on_disk_str(self) -> str:
"""
(property) Size of the blob file as a human-readable string.
Example: "42.2K".
"""
return _format_size(self.size_on_disk) | class_definition | 1,264 | 3,303 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_cache_manager.py | null | 133 |
class CachedRevisionInfo:
"""Frozen data structure holding information about a revision.
A revision correspond to a folder in the `snapshots` folder and is populated with
the exact tree structure as the repo on the Hub but contains only symlinks. A
revision can be either referenced by 1 or more `refs` or be "detached" (no refs).
Args:
commit_hash (`str`):
Hash of the revision (unique).
Example: `"9338f7b671827df886678df2bdd7cc7b4f36dffd"`.
snapshot_path (`Path`):
Path to the revision directory in the `snapshots` folder. It contains the
exact tree structure as the repo on the Hub.
files: (`FrozenSet[CachedFileInfo]`):
Set of [`~CachedFileInfo`] describing all files contained in the snapshot.
refs (`FrozenSet[str]`):
Set of `refs` pointing to this revision. If the revision has no `refs`, it
is considered detached.
Example: `{"main", "2.4.0"}` or `{"refs/pr/1"}`.
size_on_disk (`int`):
Sum of the blob file sizes that are symlink-ed by the revision.
last_modified (`float`):
Timestamp of the last time the revision has been created/modified.
<Tip warning={true}>
`last_accessed` cannot be determined correctly on a single revision as blob files
are shared across revisions.
</Tip>
<Tip warning={true}>
`size_on_disk` is not necessarily the sum of all file sizes because of possible
duplicated files. Besides, only blobs are taken into account, not the (negligible)
size of folders and symlinks.
</Tip>
"""
commit_hash: str
snapshot_path: Path
size_on_disk: int
files: FrozenSet[CachedFileInfo]
refs: FrozenSet[str]
last_modified: float
@property
def last_modified_str(self) -> str:
"""
(property) Timestamp of the last time the revision has been modified, returned
as a human-readable string.
Example: "2 weeks ago".
"""
return _format_timesince(self.last_modified)
@property
def size_on_disk_str(self) -> str:
"""
(property) Sum of the blob file sizes as a human-readable string.
Example: "42.2K".
"""
return _format_size(self.size_on_disk)
@property
def nb_files(self) -> int:
"""
(property) Total number of files in the revision.
"""
return len(self.files) | class_definition | 3,330 | 5,809 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_cache_manager.py | null | 134 |
class CachedRepoInfo:
"""Frozen data structure holding information about a cached repository.
Args:
repo_id (`str`):
Repo id of the repo on the Hub. Example: `"google/fleurs"`.
repo_type (`Literal["dataset", "model", "space"]`):
Type of the cached repo.
repo_path (`Path`):
Local path to the cached repo.
size_on_disk (`int`):
Sum of the blob file sizes in the cached repo.
nb_files (`int`):
Total number of blob files in the cached repo.
revisions (`FrozenSet[CachedRevisionInfo]`):
Set of [`~CachedRevisionInfo`] describing all revisions cached in the repo.
last_accessed (`float`):
Timestamp of the last time a blob file of the repo has been accessed.
last_modified (`float`):
Timestamp of the last time a blob file of the repo has been modified/created.
<Tip warning={true}>
`size_on_disk` is not necessarily the sum of all revisions sizes because of
duplicated files. Besides, only blobs are taken into account, not the (negligible)
size of folders and symlinks.
</Tip>
<Tip warning={true}>
`last_accessed` and `last_modified` reliability can depend on the OS you are using.
See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
for more details.
</Tip>
"""
repo_id: str
repo_type: REPO_TYPE_T
repo_path: Path
size_on_disk: int
nb_files: int
revisions: FrozenSet[CachedRevisionInfo]
last_accessed: float
last_modified: float
@property
def last_accessed_str(self) -> str:
"""
(property) Last time a blob file of the repo has been accessed, returned as a
human-readable string.
Example: "2 weeks ago".
"""
return _format_timesince(self.last_accessed)
@property
def last_modified_str(self) -> str:
"""
(property) Last time a blob file of the repo has been modified, returned as a
human-readable string.
Example: "2 weeks ago".
"""
return _format_timesince(self.last_modified)
@property
def size_on_disk_str(self) -> str:
"""
(property) Sum of the blob file sizes as a human-readable string.
Example: "42.2K".
"""
return _format_size(self.size_on_disk)
@property
def refs(self) -> Dict[str, CachedRevisionInfo]:
"""
(property) Mapping between `refs` and revision data structures.
"""
return {ref: revision for revision in self.revisions for ref in revision.refs} | class_definition | 5,836 | 8,490 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_cache_manager.py | null | 135 |
class DeleteCacheStrategy:
"""Frozen data structure holding the strategy to delete cached revisions.
This object is not meant to be instantiated programmatically but to be returned by
[`~utils.HFCacheInfo.delete_revisions`]. See documentation for usage example.
Args:
expected_freed_size (`float`):
Expected freed size once strategy is executed.
blobs (`FrozenSet[Path]`):
Set of blob file paths to be deleted.
refs (`FrozenSet[Path]`):
Set of reference file paths to be deleted.
repos (`FrozenSet[Path]`):
Set of entire repo paths to be deleted.
snapshots (`FrozenSet[Path]`):
Set of snapshots to be deleted (directory of symlinks).
"""
expected_freed_size: int
blobs: FrozenSet[Path]
refs: FrozenSet[Path]
repos: FrozenSet[Path]
snapshots: FrozenSet[Path]
@property
def expected_freed_size_str(self) -> str:
"""
(property) Expected size that will be freed as a human-readable string.
Example: "42.2K".
"""
return _format_size(self.expected_freed_size)
def execute(self) -> None:
"""Execute the defined strategy.
<Tip warning={true}>
If this method is interrupted, the cache might get corrupted. Deletion order is
implemented so that references and symlinks are deleted before the actual blob
files.
</Tip>
<Tip warning={true}>
This method is irreversible. If executed, cached files are erased and must be
downloaded again.
</Tip>
"""
# Deletion order matters. Blobs are deleted in last so that the user can't end
# up in a state where a `ref`` refers to a missing snapshot or a snapshot
# symlink refers to a deleted blob.
# Delete entire repos
for path in self.repos:
_try_delete_path(path, path_type="repo")
# Delete snapshot directories
for path in self.snapshots:
_try_delete_path(path, path_type="snapshot")
# Delete refs files
for path in self.refs:
_try_delete_path(path, path_type="ref")
# Delete blob files
for path in self.blobs:
_try_delete_path(path, path_type="blob")
logger.info(f"Cache deletion done. Saved {self.expected_freed_size_str}.") | class_definition | 8,517 | 10,915 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_cache_manager.py | null | 136 |
class HFCacheInfo:
"""Frozen data structure holding information about the entire cache-system.
This data structure is returned by [`scan_cache_dir`] and is immutable.
Args:
size_on_disk (`int`):
Sum of all valid repo sizes in the cache-system.
repos (`FrozenSet[CachedRepoInfo]`):
Set of [`~CachedRepoInfo`] describing all valid cached repos found on the
cache-system while scanning.
warnings (`List[CorruptedCacheException]`):
List of [`~CorruptedCacheException`] that occurred while scanning the cache.
Those exceptions are captured so that the scan can continue. Corrupted repos
are skipped from the scan.
<Tip warning={true}>
Here `size_on_disk` is equal to the sum of all repo sizes (only blobs). However if
some cached repos are corrupted, their sizes are not taken into account.
</Tip>
"""
size_on_disk: int
repos: FrozenSet[CachedRepoInfo]
warnings: List[CorruptedCacheException]
@property
def size_on_disk_str(self) -> str:
"""
(property) Sum of all valid repo sizes in the cache-system as a human-readable
string.
Example: "42.2K".
"""
return _format_size(self.size_on_disk)
def delete_revisions(self, *revisions: str) -> DeleteCacheStrategy:
"""Prepare the strategy to delete one or more revisions cached locally.
Input revisions can be any revision hash. If a revision hash is not found in the
local cache, a warning is thrown but no error is raised. Revisions can be from
different cached repos since hashes are unique across repos,
Examples:
```py
>>> from huggingface_hub import scan_cache_dir
>>> cache_info = scan_cache_dir()
>>> delete_strategy = cache_info.delete_revisions(
... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa"
... )
>>> print(f"Will free {delete_strategy.expected_freed_size_str}.")
Will free 7.9K.
>>> delete_strategy.execute()
Cache deletion done. Saved 7.9K.
```
```py
>>> from huggingface_hub import scan_cache_dir
>>> scan_cache_dir().delete_revisions(
... "81fd1d6e7847c99f5862c9fb81387956d99ec7aa",
... "e2983b237dccf3ab4937c97fa717319a9ca1a96d",
... "6c0e6080953db56375760c0471a8c5f2929baf11",
... ).execute()
Cache deletion done. Saved 8.6G.
```
<Tip warning={true}>
`delete_revisions` returns a [`~utils.DeleteCacheStrategy`] object that needs to
be executed. The [`~utils.DeleteCacheStrategy`] is not meant to be modified but
allows having a dry run before actually executing the deletion.
</Tip>
"""
hashes_to_delete: Set[str] = set(revisions)
repos_with_revisions: Dict[CachedRepoInfo, Set[CachedRevisionInfo]] = defaultdict(set)
for repo in self.repos:
for revision in repo.revisions:
if revision.commit_hash in hashes_to_delete:
repos_with_revisions[repo].add(revision)
hashes_to_delete.remove(revision.commit_hash)
if len(hashes_to_delete) > 0:
logger.warning(f"Revision(s) not found - cannot delete them: {', '.join(hashes_to_delete)}")
delete_strategy_blobs: Set[Path] = set()
delete_strategy_refs: Set[Path] = set()
delete_strategy_repos: Set[Path] = set()
delete_strategy_snapshots: Set[Path] = set()
delete_strategy_expected_freed_size = 0
for affected_repo, revisions_to_delete in repos_with_revisions.items():
other_revisions = affected_repo.revisions - revisions_to_delete
# If no other revisions, it means all revisions are deleted
# -> delete the entire cached repo
if len(other_revisions) == 0:
delete_strategy_repos.add(affected_repo.repo_path)
delete_strategy_expected_freed_size += affected_repo.size_on_disk
continue
# Some revisions of the repo will be deleted but not all. We need to filter
# which blob files will not be linked anymore.
for revision_to_delete in revisions_to_delete:
# Snapshot dir
delete_strategy_snapshots.add(revision_to_delete.snapshot_path)
# Refs dir
for ref in revision_to_delete.refs:
delete_strategy_refs.add(affected_repo.repo_path / "refs" / ref)
# Blobs dir
for file in revision_to_delete.files:
if file.blob_path not in delete_strategy_blobs:
is_file_alone = True
for revision in other_revisions:
for rev_file in revision.files:
if file.blob_path == rev_file.blob_path:
is_file_alone = False
break
if not is_file_alone:
break
# Blob file not referenced by remaining revisions -> delete
if is_file_alone:
delete_strategy_blobs.add(file.blob_path)
delete_strategy_expected_freed_size += file.size_on_disk
# Return the strategy instead of executing it.
return DeleteCacheStrategy(
blobs=frozenset(delete_strategy_blobs),
refs=frozenset(delete_strategy_refs),
repos=frozenset(delete_strategy_repos),
snapshots=frozenset(delete_strategy_snapshots),
expected_freed_size=delete_strategy_expected_freed_size,
)
def export_as_table(self, *, verbosity: int = 0) -> str:
"""Generate a table from the [`HFCacheInfo`] object.
Pass `verbosity=0` to get a table with a single row per repo, with columns
"repo_id", "repo_type", "size_on_disk", "nb_files", "last_accessed", "last_modified", "refs", "local_path".
Pass `verbosity=1` to get a table with a row per repo and revision (thus multiple rows can appear for a single repo), with columns
"repo_id", "repo_type", "revision", "size_on_disk", "nb_files", "last_modified", "refs", "local_path".
Example:
```py
>>> from huggingface_hub.utils import scan_cache_dir
>>> hf_cache_info = scan_cache_dir()
HFCacheInfo(...)
>>> print(hf_cache_info.export_as_table())
REPO ID REPO TYPE SIZE ON DISK NB FILES LAST_ACCESSED LAST_MODIFIED REFS LOCAL PATH
--------------------------------------------------- --------- ------------ -------- ------------- ------------- ---- --------------------------------------------------------------------------------------------------
roberta-base model 2.7M 5 1 day ago 1 week ago main ~/.cache/huggingface/hub/models--roberta-base
suno/bark model 8.8K 1 1 week ago 1 week ago main ~/.cache/huggingface/hub/models--suno--bark
t5-base model 893.8M 4 4 days ago 7 months ago main ~/.cache/huggingface/hub/models--t5-base
t5-large model 3.0G 4 5 weeks ago 5 months ago main ~/.cache/huggingface/hub/models--t5-large
>>> print(hf_cache_info.export_as_table(verbosity=1))
REPO ID REPO TYPE REVISION SIZE ON DISK NB FILES LAST_MODIFIED REFS LOCAL PATH
--------------------------------------------------- --------- ---------------------------------------- ------------ -------- ------------- ---- -----------------------------------------------------------------------------------------------------------------------------------------------------
roberta-base model e2da8e2f811d1448a5b465c236feacd80ffbac7b 2.7M 5 1 week ago main ~/.cache/huggingface/hub/models--roberta-base/snapshots/e2da8e2f811d1448a5b465c236feacd80ffbac7b
suno/bark model 70a8a7d34168586dc5d028fa9666aceade177992 8.8K 1 1 week ago main ~/.cache/huggingface/hub/models--suno--bark/snapshots/70a8a7d34168586dc5d028fa9666aceade177992
t5-base model a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1 893.8M 4 7 months ago main ~/.cache/huggingface/hub/models--t5-base/snapshots/a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1
t5-large model 150ebc2c4b72291e770f58e6057481c8d2ed331a 3.0G 4 5 months ago main ~/.cache/huggingface/hub/models--t5-large/snapshots/150ebc2c4b72291e770f58e6057481c8d2ed331a
```
Args:
verbosity (`int`, *optional*):
The verbosity level. Defaults to 0.
Returns:
`str`: The table as a string.
"""
if verbosity == 0:
return tabulate(
rows=[
[
repo.repo_id,
repo.repo_type,
"{:>12}".format(repo.size_on_disk_str),
repo.nb_files,
repo.last_accessed_str,
repo.last_modified_str,
", ".join(sorted(repo.refs)),
str(repo.repo_path),
]
for repo in sorted(self.repos, key=lambda repo: repo.repo_path)
],
headers=[
"REPO ID",
"REPO TYPE",
"SIZE ON DISK",
"NB FILES",
"LAST_ACCESSED",
"LAST_MODIFIED",
"REFS",
"LOCAL PATH",
],
)
else:
return tabulate(
rows=[
[
repo.repo_id,
repo.repo_type,
revision.commit_hash,
"{:>12}".format(revision.size_on_disk_str),
revision.nb_files,
revision.last_modified_str,
", ".join(sorted(revision.refs)),
str(revision.snapshot_path),
]
for repo in sorted(self.repos, key=lambda repo: repo.repo_path)
for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash)
],
headers=[
"REPO ID",
"REPO TYPE",
"REVISION",
"SIZE ON DISK",
"NB FILES",
"LAST_MODIFIED",
"REFS",
"LOCAL PATH",
],
) | class_definition | 10,942 | 22,279 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/utils/_cache_manager.py | null | 137 |
class ModelStatus:
"""
This Dataclass represents the model status in the Hugging Face Inference API.
Args:
loaded (`bool`):
If the model is currently loaded into Hugging Face's InferenceAPI. Models
are loaded on-demand, leading to the user's first request taking longer.
If a model is loaded, you can be assured that it is in a healthy state.
state (`str`):
The current state of the model. This can be 'Loaded', 'Loadable', 'TooBig'.
If a model's state is 'Loadable', it's not too big and has a supported
backend. Loadable models are automatically loaded when the user first
requests inference on the endpoint. This means it is transparent for the
user to load a model, except that the first call takes longer to complete.
compute_type (`Dict`):
Information about the compute resource the model is using or will use, such as 'gpu' type and number of
replicas.
framework (`str`):
The name of the framework that the model was built with, such as 'transformers'
or 'text-generation-inference'.
"""
loaded: bool
state: str
compute_type: Dict
framework: str | class_definition | 2,010 | 3,272 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_common.py | null | 138 |
class InferenceClient:
"""
Initialize a new Inference Client.
[`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used
seamlessly with either the (free) Inference API or self-hosted Inference Endpoints.
Args:
model (`str`, `optional`):
The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is
automatically selected for the task.
Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
arguments are mutually exclusive. If using `base_url` for chat completion, the `/chat/completions` suffix
path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api)
documentation for details). When passing a URL as `model`, the client will not append any suffix path to it.
token (`str` or `bool`, *optional*):
Hugging Face token. Will default to the locally saved token if not provided.
Pass `token=False` if you don't want to send your token to the server.
Note: for better compatibility with OpenAI's client, `token` has been aliased as `api_key`. Those 2
arguments are mutually exclusive and have the exact same behavior.
timeout (`float`, `optional`):
The maximum number of seconds to wait for a response from the server. Loading a new model in Inference
API can take up to several minutes. Defaults to None, meaning it will loop until the server is available.
headers (`Dict[str, str]`, `optional`):
Additional headers to send to the server. By default only the authorization and user-agent headers are sent.
Values in this dictionary will override the default values.
cookies (`Dict[str, str]`, `optional`):
Additional cookies to send to the server.
proxies (`Any`, `optional`):
Proxies to use for the request.
base_url (`str`, `optional`):
Base URL to run inference. This is a duplicated argument from `model` to make [`InferenceClient`]
follow the same pattern as `openai.OpenAI` client. Cannot be used if `model` is set. Defaults to None.
api_key (`str`, `optional`):
Token to use for authentication. This is a duplicated argument from `token` to make [`InferenceClient`]
follow the same pattern as `openai.OpenAI` client. Cannot be used if `token` is set. Defaults to None.
"""
def __init__(
self,
model: Optional[str] = None,
*,
token: Union[str, bool, None] = None,
timeout: Optional[float] = None,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
proxies: Optional[Any] = None,
# OpenAI compatibility
base_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
if model is not None and base_url is not None:
raise ValueError(
"Received both `model` and `base_url` arguments. Please provide only one of them."
" `base_url` is an alias for `model` to make the API compatible with OpenAI's client."
" If using `base_url` for chat completion, the `/chat/completions` suffix path will be appended to the base url."
" When passing a URL as `model`, the client will not append any suffix path to it."
)
if token is not None and api_key is not None:
raise ValueError(
"Received both `token` and `api_key` arguments. Please provide only one of them."
" `api_key` is an alias for `token` to make the API compatible with OpenAI's client."
" It has the exact same behavior as `token`."
)
self.model: Optional[str] = model
self.token: Union[str, bool, None] = token if token is not None else api_key
self.headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(
build_hf_headers(token=self.token) # 'authorization' + 'user-agent'
)
if headers is not None:
self.headers.update(headers)
self.cookies = cookies
self.timeout = timeout
self.proxies = proxies
# OpenAI compatibility
self.base_url = base_url
def __repr__(self):
return f"<InferenceClient(model='{self.model if self.model else ''}', timeout={self.timeout})>"
@overload
def post( # type: ignore[misc]
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: Literal[False] = ...,
) -> bytes: ...
@overload
def post( # type: ignore[misc]
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: Literal[True] = ...,
) -> Iterable[bytes]: ...
@overload
def post(
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: bool = False,
) -> Union[bytes, Iterable[bytes]]: ...
def post(
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: bool = False,
) -> Union[bytes, Iterable[bytes]]:
"""
Make a POST request to the inference server.
Args:
json (`Union[str, Dict, List]`, *optional*):
The JSON data to send in the request body, specific to each task. Defaults to None.
data (`Union[str, Path, bytes, BinaryIO]`, *optional*):
The content to send in the request body, specific to each task.
It can be raw bytes, a pointer to an opened file, a local file path,
or a URL to an online resource (image, audio file,...). If both `json` and `data` are passed,
`data` will take precedence. At least `json` or `data` must be provided. Defaults to None.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. Will override the model defined at the instance level. Defaults to None.
task (`str`, *optional*):
The task to perform on the inference. All available tasks can be found
[here](https://huggingface.co/tasks). Used only to default to a recommended model if `model` is not
provided. At least `model` or `task` must be provided. Defaults to None.
stream (`bool`, *optional*):
Whether to iterate over streaming APIs.
Returns:
bytes: The raw bytes returned by the server.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
"""
url = self._resolve_url(model, task)
if data is not None and json is not None:
warnings.warn("Ignoring `json` as `data` is passed as binary.")
# Set Accept header if relevant
headers = self.headers.copy()
if task in TASKS_EXPECTING_IMAGES and "Accept" not in headers:
headers["Accept"] = "image/png"
t0 = time.time()
timeout = self.timeout
while True:
with _open_as_binary(data) as data_as_binary:
try:
response = get_session().post(
url,
json=json,
data=data_as_binary,
headers=headers,
cookies=self.cookies,
timeout=self.timeout,
stream=stream,
proxies=self.proxies,
)
except TimeoutError as error:
# Convert any `TimeoutError` to a `InferenceTimeoutError`
raise InferenceTimeoutError(f"Inference call timed out: {url}") from error # type: ignore
try:
hf_raise_for_status(response)
return response.iter_lines() if stream else response.content
except HTTPError as error:
if error.response.status_code == 422 and task is not None:
error.args = (
f"{error.args[0]}\nMake sure '{task}' task is supported by the model.",
) + error.args[1:]
if error.response.status_code == 503:
# If Model is unavailable, either raise a TimeoutError...
if timeout is not None and time.time() - t0 > timeout:
raise InferenceTimeoutError(
f"Model not loaded on the server: {url}. Please retry with a higher timeout (current:"
f" {self.timeout}).",
request=error.request,
response=error.response,
) from error
# ...or wait 1s and retry
logger.info(f"Waiting for model to be loaded on the server: {error}")
time.sleep(1)
if "X-wait-for-model" not in headers and url.startswith(INFERENCE_ENDPOINT):
headers["X-wait-for-model"] = "1"
if timeout is not None:
timeout = max(self.timeout - (time.time() - t0), 1) # type: ignore
continue
raise
def audio_classification(
self,
audio: ContentT,
*,
model: Optional[str] = None,
top_k: Optional[int] = None,
function_to_apply: Optional["AudioClassificationOutputTransform"] = None,
) -> List[AudioClassificationOutputElement]:
"""
Perform audio classification on the provided audio content.
Args:
audio (Union[str, Path, bytes, BinaryIO]):
The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an
audio file.
model (`str`, *optional*):
The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub
or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for
audio classification will be used.
top_k (`int`, *optional*):
When specified, limits the output to the top K most probable classes.
function_to_apply (`"AudioClassificationOutputTransform"`, *optional*):
The function to apply to the model outputs in order to retrieve the scores.
Returns:
`List[AudioClassificationOutputElement]`: List of [`AudioClassificationOutputElement`] items containing the predicted labels and their confidence.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.audio_classification("audio.flac")
[
AudioClassificationOutputElement(score=0.4976358711719513, label='hap'),
AudioClassificationOutputElement(score=0.3677836060523987, label='neu'),
...
]
```
"""
parameters = {"function_to_apply": function_to_apply, "top_k": top_k}
payload = _prepare_payload(audio, parameters=parameters, expect_binary=True)
response = self.post(**payload, model=model, task="audio-classification")
return AudioClassificationOutputElement.parse_obj_as_list(response)
def audio_to_audio(
self,
audio: ContentT,
*,
model: Optional[str] = None,
) -> List[AudioToAudioOutputElement]:
"""
Performs multiple tasks related to audio-to-audio depending on the model (eg: speech enhancement, source separation).
Args:
audio (Union[str, Path, bytes, BinaryIO]):
The audio content for the model. It can be raw audio bytes, a local audio file, or a URL pointing to an
audio file.
model (`str`, *optional*):
The model can be any model which takes an audio file and returns another audio file. Can be a model ID hosted on the Hugging Face Hub
or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for
audio_to_audio will be used.
Returns:
`List[AudioToAudioOutputElement]`: A list of [`AudioToAudioOutputElement`] items containing audios label, content-type, and audio content in blob.
Raises:
`InferenceTimeoutError`:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> audio_output = client.audio_to_audio("audio.flac")
>>> for i, item in enumerate(audio_output):
>>> with open(f"output_{i}.flac", "wb") as f:
f.write(item.blob)
```
"""
response = self.post(data=audio, model=model, task="audio-to-audio")
audio_output = AudioToAudioOutputElement.parse_obj_as_list(response)
for item in audio_output:
item.blob = base64.b64decode(item.blob)
return audio_output
def automatic_speech_recognition(
self,
audio: ContentT,
*,
model: Optional[str] = None,
) -> AutomaticSpeechRecognitionOutput:
"""
Perform automatic speech recognition (ASR or audio-to-text) on the given audio content.
Args:
audio (Union[str, Path, bytes, BinaryIO]):
The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file.
model (`str`, *optional*):
The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended model for ASR will be used.
Returns:
[`AutomaticSpeechRecognitionOutput`]: An item containing the transcribed text and optionally the timestamp chunks.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.automatic_speech_recognition("hello_world.flac").text
"hello world"
```
"""
response = self.post(data=audio, model=model, task="automatic-speech-recognition")
return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response)
@overload
def chat_completion( # type: ignore
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: Literal[False] = False,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> ChatCompletionOutput: ...
@overload
def chat_completion( # type: ignore
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: Literal[True] = True,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> Iterable[ChatCompletionStreamOutput]: ...
@overload
def chat_completion(
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: bool = False,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]: ...
def chat_completion(
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: bool = False,
# Parameters from ChatCompletionInput (handled manually)
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]:
"""
A method for completing conversations using a specified language model.
<Tip>
The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
Inputs and outputs are strictly the same and using either syntax will yield the same results.
Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
for more details about OpenAI's compatibility.
</Tip>
Args:
messages (List of [`ChatCompletionInputMessage`]):
Conversation history consisting of roles and content pairs.
model (`str`, *optional*):
The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used.
See https://huggingface.co/tasks/text-generation for more details.
If `model` is a model ID, it is passed to the server as the `model` parameter. If you want to define a
custom URL while setting `model` in the request payload, you must set `base_url` when initializing [`InferenceClient`].
frequency_penalty (`float`, *optional*):
Penalizes new tokens based on their existing frequency
in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0.
logit_bias (`List[float]`, *optional*):
Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
result in a ban or exclusive selection of the relevant token. Defaults to None.
logprobs (`bool`, *optional*):
Whether to return log probabilities of the output tokens or not. If true, returns the log
probabilities of each output token returned in the content of message.
max_tokens (`int`, *optional*):
Maximum number of tokens allowed in the response. Defaults to 100.
n (`int`, *optional*):
UNUSED.
presence_penalty (`float`, *optional*):
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the
text so far, increasing the model's likelihood to talk about new topics.
response_format ([`ChatCompletionInputGrammarType`], *optional*):
Grammar constraints. Can be either a JSONSchema or a regex.
seed (Optional[`int`], *optional*):
Seed for reproducible control flow. Defaults to None.
stop (Optional[`str`], *optional*):
Up to four strings which trigger the end of the response.
Defaults to None.
stream (`bool`, *optional*):
Enable realtime streaming of responses. Defaults to False.
stream_options ([`ChatCompletionInputStreamOptions`], *optional*):
Options for streaming completions.
temperature (`float`, *optional*):
Controls randomness of the generations. Lower values ensure
less random completions. Range: [0, 2]. Defaults to 1.0.
top_logprobs (`int`, *optional*):
An integer between 0 and 5 specifying the number of most likely tokens to return at each token
position, each with an associated log probability. logprobs must be set to true if this parameter is
used.
top_p (`float`, *optional*):
Fraction of the most likely next words to sample from.
Must be between 0 and 1. Defaults to 1.0.
tool_choice ([`ChatCompletionInputToolChoiceClass`] or [`ChatCompletionInputToolChoiceEnum`], *optional*):
The tool to use for the completion. Defaults to "auto".
tool_prompt (`str`, *optional*):
A prompt to be appended before the tools.
tools (List of [`ChatCompletionInputTool`], *optional*):
A list of tools the model may call. Currently, only functions are supported as a tool. Use this to
provide a list of functions the model may generate JSON inputs for.
Returns:
[`ChatCompletionOutput`] or Iterable of [`ChatCompletionStreamOutput`]:
Generated text returned from the server:
- if `stream=False`, the generated text is returned as a [`ChatCompletionOutput`] (default).
- if `stream=True`, the generated text is returned token by token as a sequence of [`ChatCompletionStreamOutput`].
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> messages = [{"role": "user", "content": "What is the capital of France?"}]
>>> client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
>>> client.chat_completion(messages, max_tokens=100)
ChatCompletionOutput(
choices=[
ChatCompletionOutputComplete(
finish_reason='eos_token',
index=0,
message=ChatCompletionOutputMessage(
role='assistant',
content='The capital of France is Paris.',
name=None,
tool_calls=None
),
logprobs=None
)
],
created=1719907176,
id='',
model='meta-llama/Meta-Llama-3-8B-Instruct',
object='text_completion',
system_fingerprint='2.0.4-sha-f426a33',
usage=ChatCompletionOutputUsage(
completion_tokens=8,
prompt_tokens=17,
total_tokens=25
)
)
```
Example using streaming:
```py
>>> from huggingface_hub import InferenceClient
>>> messages = [{"role": "user", "content": "What is the capital of France?"}]
>>> client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
>>> for token in client.chat_completion(messages, max_tokens=10, stream=True):
... print(token)
ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content='The', role='assistant'), index=0, finish_reason=None)], created=1710498504)
ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' capital', role='assistant'), index=0, finish_reason=None)], created=1710498504)
(...)
ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' may', role='assistant'), index=0, finish_reason=None)], created=1710498504)
```
Example using OpenAI's syntax:
```py
# instead of `from openai import OpenAI`
from huggingface_hub import InferenceClient
# instead of `client = OpenAI(...)`
client = InferenceClient(
base_url=...,
api_key=...,
)
output = client.chat.completions.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Count to 10"},
],
stream=True,
max_tokens=1024,
)
for chunk in output:
print(chunk.choices[0].delta.content)
```
Example using Image + Text as input:
```py
>>> from huggingface_hub import InferenceClient
# provide a remote URL
>>> image_url ="https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
# or a base64-encoded image
>>> image_path = "/path/to/image.jpeg"
>>> with open(image_path, "rb") as f:
... base64_image = base64.b64encode(f.read()).decode("utf-8")
>>> image_url = f"data:image/jpeg;base64,{base64_image}"
>>> client = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
>>> output = client.chat.completions.create(
... messages=[
... {
... "role": "user",
... "content": [
... {
... "type": "image_url",
... "image_url": {"url": image_url},
... },
... {
... "type": "text",
... "text": "Describe this image in one sentence.",
... },
... ],
... },
... ],
... )
>>> output
The image depicts the iconic Statue of Liberty situated in New York Harbor, New York, on a clear day.
```
Example using tools:
```py
>>> client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
>>> messages = [
... {
... "role": "system",
... "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.",
... },
... {
... "role": "user",
... "content": "What's the weather like the next 3 days in San Francisco, CA?",
... },
... ]
>>> tools = [
... {
... "type": "function",
... "function": {
... "name": "get_current_weather",
... "description": "Get the current weather",
... "parameters": {
... "type": "object",
... "properties": {
... "location": {
... "type": "string",
... "description": "The city and state, e.g. San Francisco, CA",
... },
... "format": {
... "type": "string",
... "enum": ["celsius", "fahrenheit"],
... "description": "The temperature unit to use. Infer this from the users location.",
... },
... },
... "required": ["location", "format"],
... },
... },
... },
... {
... "type": "function",
... "function": {
... "name": "get_n_day_weather_forecast",
... "description": "Get an N-day weather forecast",
... "parameters": {
... "type": "object",
... "properties": {
... "location": {
... "type": "string",
... "description": "The city and state, e.g. San Francisco, CA",
... },
... "format": {
... "type": "string",
... "enum": ["celsius", "fahrenheit"],
... "description": "The temperature unit to use. Infer this from the users location.",
... },
... "num_days": {
... "type": "integer",
... "description": "The number of days to forecast",
... },
... },
... "required": ["location", "format", "num_days"],
... },
... },
... },
... ]
>>> response = client.chat_completion(
... model="meta-llama/Meta-Llama-3-70B-Instruct",
... messages=messages,
... tools=tools,
... tool_choice="auto",
... max_tokens=500,
... )
>>> response.choices[0].message.tool_calls[0].function
ChatCompletionOutputFunctionDefinition(
arguments={
'location': 'San Francisco, CA',
'format': 'fahrenheit',
'num_days': 3
},
name='get_n_day_weather_forecast',
description=None
)
```
Example using response_format:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
>>> messages = [
... {
... "role": "user",
... "content": "I saw a puppy a cat and a raccoon during my bike ride in the park. What did I saw and when?",
... },
... ]
>>> response_format = {
... "type": "json",
... "value": {
... "properties": {
... "location": {"type": "string"},
... "activity": {"type": "string"},
... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5},
... "animals": {"type": "array", "items": {"type": "string"}},
... },
... "required": ["location", "activity", "animals_seen", "animals"],
... },
... }
>>> response = client.chat_completion(
... messages=messages,
... response_format=response_format,
... max_tokens=500,
)
>>> response.choices[0].message.content
'{\n\n"activity": "bike ride",\n"animals": ["puppy", "cat", "raccoon"],\n"animals_seen": 3,\n"location": "park"}'
```
"""
model_url = self._resolve_chat_completion_url(model)
# `model` is sent in the payload. Not used by the server but can be useful for debugging/routing.
# If it's a ID on the Hub => use it. Otherwise, we use a random string.
model_id = model or self.model or "tgi"
if model_id.startswith(("http://", "https://")):
model_id = "tgi" # dummy value
payload = dict(
model=model_id,
messages=messages,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
response_format=response_format,
seed=seed,
stop=stop,
temperature=temperature,
tool_choice=tool_choice,
tool_prompt=tool_prompt,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
stream=stream,
stream_options=stream_options,
)
payload = {key: value for key, value in payload.items() if value is not None}
data = self.post(model=model_url, json=payload, stream=stream)
if stream:
return _stream_chat_completion_response(data) # type: ignore[arg-type]
return ChatCompletionOutput.parse_obj_as_instance(data) # type: ignore[arg-type]
def _resolve_chat_completion_url(self, model: Optional[str] = None) -> str:
# Since `chat_completion(..., model=xxx)` is also a payload parameter for the server, we need to handle 'model' differently.
# `self.base_url` and `self.model` takes precedence over 'model' argument only in `chat_completion`.
model_id_or_url = self.base_url or self.model or model or self.get_recommended_model("text-generation")
# Resolve URL if it's a model ID
model_url = (
model_id_or_url
if model_id_or_url.startswith(("http://", "https://"))
else self._resolve_url(model_id_or_url, task="text-generation")
)
# Strip trailing /
model_url = model_url.rstrip("/")
# Append /chat/completions if not already present
if model_url.endswith("/v1"):
model_url += "/chat/completions"
# Append /v1/chat/completions if not already present
if not model_url.endswith("/chat/completions"):
model_url += "/v1/chat/completions"
return model_url
def document_question_answering(
self,
image: ContentT,
question: str,
*,
model: Optional[str] = None,
doc_stride: Optional[int] = None,
handle_impossible_answer: Optional[bool] = None,
lang: Optional[str] = None,
max_answer_len: Optional[int] = None,
max_question_len: Optional[int] = None,
max_seq_len: Optional[int] = None,
top_k: Optional[int] = None,
word_boxes: Optional[List[Union[List[float], str]]] = None,
) -> List[DocumentQuestionAnsweringOutputElement]:
"""
Answer questions on document images.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image for the context. It can be raw bytes, an image file, or a URL to an online image.
question (`str`):
Question to be answered.
model (`str`, *optional*):
The model to use for the document question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended document question answering model will be used.
Defaults to None.
doc_stride (`int`, *optional*):
If the words in the document are too long to fit with the question for the model, it will be split in
several chunks with some overlap. This argument controls the size of that overlap.
handle_impossible_answer (`bool`, *optional*):
Whether to accept impossible as an answer
lang (`str`, *optional*):
Language to use while running OCR. Defaults to english.
max_answer_len (`int`, *optional*):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_question_len (`int`, *optional*):
The maximum length of the question after tokenization. It will be truncated if needed.
max_seq_len (`int`, *optional*):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using doc_stride as overlap) if needed.
top_k (`int`, *optional*):
The number of answers to return (will be chosen by order of likelihood). Can return less than top_k
answers if there are not enough options available within the context.
word_boxes (`List[Union[List[float], str`, *optional*):
A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR
step and use the provided bounding boxes instead.
Returns:
`List[DocumentQuestionAnsweringOutputElement]`: a list of [`DocumentQuestionAnsweringOutputElement`] items containing the predicted label, associated probability, word ids, and page number.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.document_question_answering(image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", question="What is the invoice number?")
[DocumentQuestionAnsweringOutputElement(answer='us-001', end=16, score=0.9999666213989258, start=16)]
```
"""
inputs: Dict[str, Any] = {"question": question, "image": _b64_encode(image)}
parameters = {
"doc_stride": doc_stride,
"handle_impossible_answer": handle_impossible_answer,
"lang": lang,
"max_answer_len": max_answer_len,
"max_question_len": max_question_len,
"max_seq_len": max_seq_len,
"top_k": top_k,
"word_boxes": word_boxes,
}
payload = _prepare_payload(inputs, parameters=parameters)
response = self.post(**payload, model=model, task="document-question-answering")
return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response)
def feature_extraction(
self,
text: str,
*,
normalize: Optional[bool] = None,
prompt_name: Optional[str] = None,
truncate: Optional[bool] = None,
truncation_direction: Optional[Literal["Left", "Right"]] = None,
model: Optional[str] = None,
) -> "np.ndarray":
"""
Generate embeddings for a given text.
Args:
text (`str`):
The text to embed.
model (`str`, *optional*):
The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
Defaults to None.
normalize (`bool`, *optional*):
Whether to normalize the embeddings or not.
Only available on server powered by Text-Embedding-Inference.
prompt_name (`str`, *optional*):
The name of the prompt that should be used by for encoding. If not set, no prompt will be applied.
Must be a key in the `Sentence Transformers` configuration `prompts` dictionary.
For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",...},
then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?"
because the prompt text will be prepended before any text to encode.
truncate (`bool`, *optional*):
Whether to truncate the embeddings or not.
Only available on server powered by Text-Embedding-Inference.
truncation_direction (`Literal["Left", "Right"]`, *optional*):
Which side of the input should be truncated when `truncate=True` is passed.
Returns:
`np.ndarray`: The embedding representing the input text as a float32 numpy array.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.feature_extraction("Hi, who are you?")
array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ],
[-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ],
...,
[ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32)
```
"""
parameters = {
"normalize": normalize,
"prompt_name": prompt_name,
"truncate": truncate,
"truncation_direction": truncation_direction,
}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(**payload, model=model, task="feature-extraction")
np = _import_numpy()
return np.array(_bytes_to_dict(response), dtype="float32")
def fill_mask(
self,
text: str,
*,
model: Optional[str] = None,
targets: Optional[List[str]] = None,
top_k: Optional[int] = None,
) -> List[FillMaskOutputElement]:
"""
Fill in a hole with a missing word (token to be precise).
Args:
text (`str`):
a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask).
model (`str`, *optional*):
The model to use for the fill mask task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended fill mask model will be used.
targets (`List[str`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocabulary. If the provided targets are not in the model vocab, they will be tokenized and the first
resulting token will be used (with a warning, and that might be slower).
top_k (`int`, *optional*):
When passed, overrides the number of predictions to return.
Returns:
`List[FillMaskOutputElement]`: a list of [`FillMaskOutputElement`] items containing the predicted label, associated
probability, token reference, and completed text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.fill_mask("The goal of life is <mask>.")
[
FillMaskOutputElement(score=0.06897063553333282, token=11098, token_str=' happiness', sequence='The goal of life is happiness.'),
FillMaskOutputElement(score=0.06554922461509705, token=45075, token_str=' immortality', sequence='The goal of life is immortality.')
]
```
"""
parameters = {"targets": targets, "top_k": top_k}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(**payload, model=model, task="fill-mask")
return FillMaskOutputElement.parse_obj_as_list(response)
def image_classification(
self,
image: ContentT,
*,
model: Optional[str] = None,
function_to_apply: Optional["ImageClassificationOutputTransform"] = None,
top_k: Optional[int] = None,
) -> List[ImageClassificationOutputElement]:
"""
Perform image classification on the given image using the specified model.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The image to classify. It can be raw bytes, an image file, or a URL to an online image.
model (`str`, *optional*):
The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a
deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used.
function_to_apply (`"ImageClassificationOutputTransform"`, *optional*):
The function to apply to the model outputs in order to retrieve the scores.
top_k (`int`, *optional*):
When specified, limits the output to the top K most probable classes.
Returns:
`List[ImageClassificationOutputElement]`: a list of [`ImageClassificationOutputElement`] items containing the predicted label and associated probability.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
[ImageClassificationOutputElement(label='Blenheim spaniel', score=0.9779096841812134), ...]
```
"""
parameters = {"function_to_apply": function_to_apply, "top_k": top_k}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = self.post(**payload, model=model, task="image-classification")
return ImageClassificationOutputElement.parse_obj_as_list(response)
def image_segmentation(
self,
image: ContentT,
*,
model: Optional[str] = None,
mask_threshold: Optional[float] = None,
overlap_mask_area_threshold: Optional[float] = None,
subtask: Optional["ImageSegmentationSubtask"] = None,
threshold: Optional[float] = None,
) -> List[ImageSegmentationOutputElement]:
"""
Perform image segmentation on the given image using the specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The image to segment. It can be raw bytes, an image file, or a URL to an online image.
model (`str`, *optional*):
The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a
deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used.
mask_threshold (`float`, *optional*):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*):
Mask overlap threshold to eliminate small, disconnected segments.
subtask (`"ImageSegmentationSubtask"`, *optional*):
Segmentation task to be performed, depending on model capabilities.
threshold (`float`, *optional*):
Probability threshold to filter out predicted masks.
Returns:
`List[ImageSegmentationOutputElement]`: A list of [`ImageSegmentationOutputElement`] items containing the segmented masks and associated attributes.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.image_segmentation("cat.jpg")
[ImageSegmentationOutputElement(score=0.989008, label='LABEL_184', mask=<PIL.PngImagePlugin.PngImageFile image mode=L size=400x300 at 0x7FDD2B129CC0>), ...]
```
"""
parameters = {
"mask_threshold": mask_threshold,
"overlap_mask_area_threshold": overlap_mask_area_threshold,
"subtask": subtask,
"threshold": threshold,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = self.post(**payload, model=model, task="image-segmentation")
output = ImageSegmentationOutputElement.parse_obj_as_list(response)
for item in output:
item.mask = _b64_to_image(item.mask) # type: ignore [assignment]
return output
def image_to_image(
self,
image: ContentT,
prompt: Optional[str] = None,
*,
negative_prompt: Optional[List[str]] = None,
num_inference_steps: Optional[int] = None,
guidance_scale: Optional[float] = None,
model: Optional[str] = None,
target_size: Optional[ImageToImageTargetSize] = None,
**kwargs,
) -> "Image":
"""
Perform image-to-image translation using a specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image for translation. It can be raw bytes, an image file, or a URL to an online image.
prompt (`str`, *optional*):
The text prompt to guide the image generation.
negative_prompt (`List[str]`, *optional*):
One or several prompt to guide what NOT to include in image generation.
num_inference_steps (`int`, *optional*):
For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher
quality image at the expense of slower inference.
guidance_scale (`float`, *optional*):
For diffusion models. A higher guidance scale value encourages the model to generate images closely
linked to the text prompt at the expense of lower image quality.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
target_size (`ImageToImageTargetSize`, *optional*):
The size in pixel of the output image.
Returns:
`Image`: The translated image.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> image = client.image_to_image("cat.jpg", prompt="turn the cat into a tiger")
>>> image.save("tiger.jpg")
```
"""
parameters = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"target_size": target_size,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
**kwargs,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = self.post(**payload, model=model, task="image-to-image")
return _bytes_to_image(response)
def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput:
"""
Takes an input image and return text.
Models can have very different outputs depending on your use case (image captioning, optical character recognition
(OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image to caption. It can be raw bytes, an image file, or a URL to an online image..
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
Returns:
[`ImageToTextOutput`]: The generated text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.image_to_text("cat.jpg")
'a cat standing in a grassy field '
>>> client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
'a dog laying on the grass next to a flower pot '
```
"""
response = self.post(data=image, model=model, task="image-to-text")
output = ImageToTextOutput.parse_obj(response)
return output[0] if isinstance(output, list) else output
def list_deployed_models(
self, frameworks: Union[None, str, Literal["all"], List[str]] = None
) -> Dict[str, List[str]]:
"""
List models deployed on the Serverless Inference API service.
This helper checks deployed models framework by framework. By default, it will check the 4 main frameworks that
are supported and account for 95% of the hosted models. However, if you want a complete list of models you can
specify `frameworks="all"` as input. Alternatively, if you know before-hand which framework you are interested
in, you can also restrict to search to this one (e.g. `frameworks="text-generation-inference"`). The more
frameworks are checked, the more time it will take.
<Tip warning={true}>
This endpoint method does not return a live list of all models available for the Serverless Inference API service.
It searches over a cached list of models that were recently available and the list may not be up to date.
If you want to know the live status of a specific model, use [`~InferenceClient.get_model_status`].
</Tip>
<Tip>
This endpoint method is mostly useful for discoverability. If you already know which model you want to use and want to
check its availability, you can directly use [`~InferenceClient.get_model_status`].
</Tip>
Args:
frameworks (`Literal["all"]` or `List[str]` or `str`, *optional*):
The frameworks to filter on. By default only a subset of the available frameworks are tested. If set to
"all", all available frameworks will be tested. It is also possible to provide a single framework or a
custom set of frameworks to check.
Returns:
`Dict[str, List[str]]`: A dictionary mapping task names to a sorted list of model IDs.
Example:
```python
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
# Discover zero-shot-classification models currently deployed
>>> models = client.list_deployed_models()
>>> models["zero-shot-classification"]
['Narsil/deberta-large-mnli-zero-cls', 'facebook/bart-large-mnli', ...]
# List from only 1 framework
>>> client.list_deployed_models("text-generation-inference")
{'text-generation': ['bigcode/starcoder', 'meta-llama/Llama-2-70b-chat-hf', ...], ...}
```
"""
# Resolve which frameworks to check
if frameworks is None:
frameworks = MAIN_INFERENCE_API_FRAMEWORKS
elif frameworks == "all":
frameworks = ALL_INFERENCE_API_FRAMEWORKS
elif isinstance(frameworks, str):
frameworks = [frameworks]
frameworks = list(set(frameworks))
# Fetch them iteratively
models_by_task: Dict[str, List[str]] = {}
def _unpack_response(framework: str, items: List[Dict]) -> None:
for model in items:
if framework == "sentence-transformers":
# Model running with the `sentence-transformers` framework can work with both tasks even if not
# branded as such in the API response
models_by_task.setdefault("feature-extraction", []).append(model["model_id"])
models_by_task.setdefault("sentence-similarity", []).append(model["model_id"])
else:
models_by_task.setdefault(model["task"], []).append(model["model_id"])
for framework in frameworks:
response = get_session().get(f"{INFERENCE_ENDPOINT}/framework/{framework}", headers=self.headers)
hf_raise_for_status(response)
_unpack_response(framework, response.json())
# Sort alphabetically for discoverability and return
for task, models in models_by_task.items():
models_by_task[task] = sorted(set(models), key=lambda x: x.lower())
return models_by_task
def object_detection(
self, image: ContentT, *, model: Optional[str] = None, threshold: Optional[float] = None
) -> List[ObjectDetectionOutputElement]:
"""
Perform object detection on the given image using the specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The image to detect objects on. It can be raw bytes, an image file, or a URL to an online image.
model (`str`, *optional*):
The model to use for object detection. Can be a model ID hosted on the Hugging Face Hub or a URL to a
deployed Inference Endpoint. If not provided, the default recommended model for object detection (DETR) will be used.
threshold (`float`, *optional*):
The probability necessary to make a prediction.
Returns:
`List[ObjectDetectionOutputElement]`: A list of [`ObjectDetectionOutputElement`] items containing the bounding boxes and associated attributes.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
`ValueError`:
If the request output is not a List.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.object_detection("people.jpg")
[ObjectDetectionOutputElement(score=0.9486683011054993, label='person', box=ObjectDetectionBoundingBox(xmin=59, ymin=39, xmax=420, ymax=510)), ...]
```
"""
parameters = {
"threshold": threshold,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = self.post(**payload, model=model, task="object-detection")
return ObjectDetectionOutputElement.parse_obj_as_list(response)
def question_answering(
self,
question: str,
context: str,
*,
model: Optional[str] = None,
align_to_words: Optional[bool] = None,
doc_stride: Optional[int] = None,
handle_impossible_answer: Optional[bool] = None,
max_answer_len: Optional[int] = None,
max_question_len: Optional[int] = None,
max_seq_len: Optional[int] = None,
top_k: Optional[int] = None,
) -> Union[QuestionAnsweringOutputElement, List[QuestionAnsweringOutputElement]]:
"""
Retrieve the answer to a question from a given text.
Args:
question (`str`):
Question to be answered.
context (`str`):
The context of the question.
model (`str`):
The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint.
align_to_words (`bool`, *optional*):
Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt
on non-space-separated languages (like Japanese or Chinese)
doc_stride (`int`, *optional*):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
handle_impossible_answer (`bool`, *optional*):
Whether to accept impossible as an answer.
max_answer_len (`int`, *optional*):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_question_len (`int`, *optional*):
The maximum length of the question after tokenization. It will be truncated if needed.
max_seq_len (`int`, *optional*):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using docStride as overlap) if needed.
top_k (`int`, *optional*):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
topk answers if there are not enough options available within the context.
Returns:
Union[`QuestionAnsweringOutputElement`, List[`QuestionAnsweringOutputElement`]]:
When top_k is 1 or not provided, it returns a single `QuestionAnsweringOutputElement`.
When top_k is greater than 1, it returns a list of `QuestionAnsweringOutputElement`.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.")
QuestionAnsweringOutputElement(answer='Clara', end=16, score=0.9326565265655518, start=11)
```
"""
parameters = {
"align_to_words": align_to_words,
"doc_stride": doc_stride,
"handle_impossible_answer": handle_impossible_answer,
"max_answer_len": max_answer_len,
"max_question_len": max_question_len,
"max_seq_len": max_seq_len,
"top_k": top_k,
}
inputs: Dict[str, Any] = {"question": question, "context": context}
payload = _prepare_payload(inputs, parameters=parameters)
response = self.post(
**payload,
model=model,
task="question-answering",
)
# Parse the response as a single `QuestionAnsweringOutputElement` when top_k is 1 or not provided, or a list of `QuestionAnsweringOutputElement` to ensure backward compatibility.
output = QuestionAnsweringOutputElement.parse_obj(response)
return output
def sentence_similarity(
self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None
) -> List[float]:
"""
Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings.
Args:
sentence (`str`):
The main sentence to compare to others.
other_sentences (`List[str]`):
The list of sentences to compare to.
model (`str`, *optional*):
The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
Defaults to None.
Returns:
`List[float]`: The embedding representing the input text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.sentence_similarity(
... "Machine learning is so easy.",
... other_sentences=[
... "Deep learning is so straightforward.",
... "This is so difficult, like rocket science.",
... "I can't believe how much I struggled with this.",
... ],
... )
[0.7785726189613342, 0.45876261591911316, 0.2906220555305481]
```
"""
response = self.post(
json={"inputs": {"source_sentence": sentence, "sentences": other_sentences}},
model=model,
task="sentence-similarity",
)
return _bytes_to_list(response)
@_deprecate_arguments(
version="0.29",
deprecated_args=["parameters"],
custom_message=(
"The `parameters` argument is deprecated and will be removed in a future version. "
"Provide individual parameters instead: `clean_up_tokenization_spaces`, `generate_parameters`, and `truncation`."
),
)
def summarization(
self,
text: str,
*,
parameters: Optional[Dict[str, Any]] = None,
model: Optional[str] = None,
clean_up_tokenization_spaces: Optional[bool] = None,
generate_parameters: Optional[Dict[str, Any]] = None,
truncation: Optional["SummarizationTruncationStrategy"] = None,
) -> SummarizationOutput:
"""
Generate a summary of a given text using a specified model.
Args:
text (`str`):
The input text to summarize.
parameters (`Dict[str, Any]`, *optional*):
Additional parameters for summarization. Check out this [page](https://huggingface.co/docs/api-inference/detailed_parameters#summarization-task)
for more details.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended model for summarization will be used.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether to clean up the potential extra spaces in the text output.
generate_parameters (`Dict[str, Any]`, *optional*):
Additional parametrization of the text generation algorithm.
truncation (`"SummarizationTruncationStrategy"`, *optional*):
The truncation strategy to use.
Returns:
[`SummarizationOutput`]: The generated summary text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.summarization("The Eiffel tower...")
SummarizationOutput(generated_text="The Eiffel tower is one of the most famous landmarks in the world....")
```
"""
if parameters is None:
parameters = {
"clean_up_tokenization_spaces": clean_up_tokenization_spaces,
"generate_parameters": generate_parameters,
"truncation": truncation,
}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(**payload, model=model, task="summarization")
return SummarizationOutput.parse_obj_as_list(response)[0]
def table_question_answering(
self,
table: Dict[str, Any],
query: str,
*,
model: Optional[str] = None,
padding: Optional["Padding"] = None,
sequential: Optional[bool] = None,
truncation: Optional[bool] = None,
) -> TableQuestionAnsweringOutputElement:
"""
Retrieve the answer to a question from information given in a table.
Args:
table (`str`):
A table of data represented as a dict of lists where entries are headers and the lists are all the
values, all lists must have the same size.
query (`str`):
The query in plain text that you want to ask the table.
model (`str`):
The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face
Hub or a URL to a deployed Inference Endpoint.
padding (`"Padding"`, *optional*):
Activates and controls padding.
sequential (`bool`, *optional*):
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
inference to be done sequentially to extract relations within sequences, given their conversational
nature.
truncation (`bool`, *optional*):
Activates and controls truncation.
Returns:
[`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> query = "How many stars does the transformers repository have?"
>>> table = {"Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"]}
>>> client.table_question_answering(table, query, model="google/tapas-base-finetuned-wtq")
TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE')
```
"""
parameters = {
"padding": padding,
"sequential": sequential,
"truncation": truncation,
}
inputs = {
"query": query,
"table": table,
}
payload = _prepare_payload(inputs, parameters=parameters)
response = self.post(
**payload,
model=model,
task="table-question-answering",
)
return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response)
def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[str]:
"""
Classifying a target category (a group) based on a set of attributes.
Args:
table (`Dict[str, Any]`):
Set of attributes to classify.
model (`str`, *optional*):
The model to use for the tabular classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended tabular classification model will be used.
Defaults to None.
Returns:
`List`: a list of labels, one per row in the initial table.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> table = {
... "fixed_acidity": ["7.4", "7.8", "10.3"],
... "volatile_acidity": ["0.7", "0.88", "0.32"],
... "citric_acid": ["0", "0", "0.45"],
... "residual_sugar": ["1.9", "2.6", "6.4"],
... "chlorides": ["0.076", "0.098", "0.073"],
... "free_sulfur_dioxide": ["11", "25", "5"],
... "total_sulfur_dioxide": ["34", "67", "13"],
... "density": ["0.9978", "0.9968", "0.9976"],
... "pH": ["3.51", "3.2", "3.23"],
... "sulphates": ["0.56", "0.68", "0.82"],
... "alcohol": ["9.4", "9.8", "12.6"],
... }
>>> client.tabular_classification(table=table, model="julien-c/wine-quality")
["5", "5", "5"]
```
"""
response = self.post(
json={"table": table},
model=model,
task="tabular-classification",
)
return _bytes_to_list(response)
def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[float]:
"""
Predicting a numerical target value given a set of attributes/features in a table.
Args:
table (`Dict[str, Any]`):
Set of attributes stored in a table. The attributes used to predict the target can be both numerical and categorical.
model (`str`, *optional*):
The model to use for the tabular regression task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended tabular regression model will be used.
Defaults to None.
Returns:
`List`: a list of predicted numerical target values.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> table = {
... "Height": ["11.52", "12.48", "12.3778"],
... "Length1": ["23.2", "24", "23.9"],
... "Length2": ["25.4", "26.3", "26.5"],
... "Length3": ["30", "31.2", "31.1"],
... "Species": ["Bream", "Bream", "Bream"],
... "Width": ["4.02", "4.3056", "4.6961"],
... }
>>> client.tabular_regression(table, model="scikit-learn/Fish-Weight")
[110, 120, 130]
```
"""
response = self.post(json={"table": table}, model=model, task="tabular-regression")
return _bytes_to_list(response)
def text_classification(
self,
text: str,
*,
model: Optional[str] = None,
top_k: Optional[int] = None,
function_to_apply: Optional["TextClassificationOutputTransform"] = None,
) -> List[TextClassificationOutputElement]:
"""
Perform text classification (e.g. sentiment-analysis) on the given text.
Args:
text (`str`):
A string to be classified.
model (`str`, *optional*):
The model to use for the text classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended text classification model will be used.
Defaults to None.
top_k (`int`, *optional*):
When specified, limits the output to the top K most probable classes.
function_to_apply (`"TextClassificationOutputTransform"`, *optional*):
The function to apply to the model outputs in order to retrieve the scores.
Returns:
`List[TextClassificationOutputElement]`: a list of [`TextClassificationOutputElement`] items containing the predicted label and associated probability.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.text_classification("I like you")
[
TextClassificationOutputElement(label='POSITIVE', score=0.9998695850372314),
TextClassificationOutputElement(label='NEGATIVE', score=0.0001304351753788069),
]
```
"""
parameters = {
"function_to_apply": function_to_apply,
"top_k": top_k,
}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(
**payload,
model=model,
task="text-classification",
)
return TextClassificationOutputElement.parse_obj_as_list(response)[0] # type: ignore [return-value]
@overload
def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[False] = ...,
stream: Literal[False] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> str: ...
@overload
def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[True] = ...,
stream: Literal[False] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> TextGenerationOutput: ...
@overload
def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[False] = ...,
stream: Literal[True] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> Iterable[str]: ...
@overload
def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[True] = ...,
stream: Literal[True] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> Iterable[TextGenerationStreamOutput]: ...
@overload
def text_generation(
self,
prompt: str,
*,
details: Literal[True] = ...,
stream: bool = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> Union[TextGenerationOutput, Iterable[TextGenerationStreamOutput]]: ...
def text_generation(
self,
prompt: str,
*,
details: bool = False,
stream: bool = False,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]:
"""
Given a prompt, generate the following text.
API endpoint is supposed to run with the `text-generation-inference` backend (TGI). This backend is the
go-to solution to run large language models at scale. However, for some smaller models (e.g. "gpt2") the
default `transformers` + `api-inference` solution is still in use. Both approaches have very similar APIs, but
not exactly the same. This method is compatible with both approaches but some parameters are only available for
`text-generation-inference`. If some parameters are ignored, a warning message is triggered but the process
continues correctly.
To learn more about the TGI project, please refer to https://github.com/huggingface/text-generation-inference.
<Tip>
If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
</Tip>
Args:
prompt (`str`):
Input text.
details (`bool`, *optional*):
By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens,
probabilities, seed, finish reason, etc.). Only available for models running on with the
`text-generation-inference` backend.
stream (`bool`, *optional*):
By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of
tokens to be returned. Only available for models running on with the `text-generation-inference`
backend.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
adapter_id (`str`, *optional*):
Lora adapter id.
best_of (`int`, *optional*):
Generate best_of sequences and return the one if the highest token logprobs.
decoder_input_details (`bool`, *optional*):
Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken
into account. Defaults to `False`.
do_sample (`bool`, *optional*):
Activate logits sampling
frequency_penalty (`float`, *optional*):
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in
the text so far, decreasing the model's likelihood to repeat the same line verbatim.
grammar ([`TextGenerationInputGrammarType`], *optional*):
Grammar constraints. Can be either a JSONSchema or a regex.
max_new_tokens (`int`, *optional*):
Maximum number of generated tokens. Defaults to 100.
repetition_penalty (`float`, *optional*):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
return_full_text (`bool`, *optional*):
Whether to prepend the prompt to the generated text
seed (`int`, *optional*):
Random sampling seed
stop (`List[str]`, *optional*):
Stop generating tokens if a member of `stop` is generated.
stop_sequences (`List[str]`, *optional*):
Deprecated argument. Use `stop` instead.
temperature (`float`, *optional*):
The value used to module the logits distribution.
top_n_tokens (`int`, *optional*):
Return information about the `top_n_tokens` most likely tokens at each generation step, instead of
just the sampled token.
top_k (`int`, *optional`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`, *optional`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`, *optional`):
Truncate inputs tokens to the given size.
typical_p (`float`, *optional`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`, *optional`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
Returns:
`Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]`:
Generated text returned from the server:
- if `stream=False` and `details=False`, the generated text is returned as a `str` (default)
- if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]`
- if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.TextGenerationOutput`]
- if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.TextGenerationStreamOutput`]
Raises:
`ValidationError`:
If input values are not valid. No HTTP call is made to the server.
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
# Case 1: generate text
>>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12)
'100% open source and built to be easy to use.'
# Case 2: iterate over the generated tokens. Useful for large generation.
>>> for token in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True):
... print(token)
100
%
open
source
and
built
to
be
easy
to
use
.
# Case 3: get more details about the generation process.
>>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True)
TextGenerationOutput(
generated_text='100% open source and built to be easy to use.',
details=TextGenerationDetails(
finish_reason='length',
generated_tokens=12,
seed=None,
prefill=[
TextGenerationPrefillOutputToken(id=487, text='The', logprob=None),
TextGenerationPrefillOutputToken(id=53789, text=' hugging', logprob=-13.171875),
(...)
TextGenerationPrefillOutputToken(id=204, text=' ', logprob=-7.0390625)
],
tokens=[
TokenElement(id=1425, text='100', logprob=-1.0175781, special=False),
TokenElement(id=16, text='%', logprob=-0.0463562, special=False),
(...)
TokenElement(id=25, text='.', logprob=-0.5703125, special=False)
],
best_of_sequences=None
)
)
# Case 4: iterate over the generated tokens with more details.
# Last object is more complete, containing the full generated text and the finish reason.
>>> for details in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True):
... print(details)
...
TextGenerationStreamOutput(token=TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(
id=25,
text='.',
logprob=-0.5703125,
special=False),
generated_text='100% open source and built to be easy to use.',
details=TextGenerationStreamOutputStreamDetails(finish_reason='length', generated_tokens=12, seed=None)
)
# Case 5: generate constrained output using grammar
>>> response = client.text_generation(
... prompt="I saw a puppy a cat and a raccoon during my bike ride in the park",
... model="HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
... max_new_tokens=100,
... repetition_penalty=1.3,
... grammar={
... "type": "json",
... "value": {
... "properties": {
... "location": {"type": "string"},
... "activity": {"type": "string"},
... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5},
... "animals": {"type": "array", "items": {"type": "string"}},
... },
... "required": ["location", "activity", "animals_seen", "animals"],
... },
... },
... )
>>> json.loads(response)
{
"activity": "bike riding",
"animals": ["puppy", "cat", "raccoon"],
"animals_seen": 3,
"location": "park"
}
```
"""
if decoder_input_details and not details:
warnings.warn(
"`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that"
" the output from the server will be truncated."
)
decoder_input_details = False
if stop_sequences is not None:
warnings.warn(
"`stop_sequences` is a deprecated argument for `text_generation` task"
" and will be removed in version '0.28.0'. Use `stop` instead.",
FutureWarning,
)
if stop is None:
stop = stop_sequences # use deprecated arg if provided
# Build payload
parameters = {
"adapter_id": adapter_id,
"best_of": best_of,
"decoder_input_details": decoder_input_details,
"details": details,
"do_sample": do_sample,
"frequency_penalty": frequency_penalty,
"grammar": grammar,
"max_new_tokens": max_new_tokens,
"repetition_penalty": repetition_penalty,
"return_full_text": return_full_text,
"seed": seed,
"stop": stop if stop is not None else [],
"temperature": temperature,
"top_k": top_k,
"top_n_tokens": top_n_tokens,
"top_p": top_p,
"truncate": truncate,
"typical_p": typical_p,
"watermark": watermark,
}
parameters = {k: v for k, v in parameters.items() if v is not None}
payload = {
"inputs": prompt,
"parameters": parameters,
"stream": stream,
}
# Remove some parameters if not a TGI server
unsupported_kwargs = _get_unsupported_text_generation_kwargs(model)
if len(unsupported_kwargs) > 0:
# The server does not support some parameters
# => means it is not a TGI server
# => remove unsupported parameters and warn the user
ignored_parameters = []
for key in unsupported_kwargs:
if parameters.get(key):
ignored_parameters.append(key)
parameters.pop(key, None)
if len(ignored_parameters) > 0:
warnings.warn(
"API endpoint/model for text-generation is not served via TGI. Ignoring following parameters:"
f" {', '.join(ignored_parameters)}.",
UserWarning,
)
if details:
warnings.warn(
"API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will"
" be ignored meaning only the generated text will be returned.",
UserWarning,
)
details = False
if stream:
raise ValueError(
"API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream."
" Please pass `stream=False` as input."
)
# Handle errors separately for more precise error messages
try:
bytes_output = self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore
except HTTPError as e:
match = MODEL_KWARGS_NOT_USED_REGEX.search(str(e))
if isinstance(e, BadRequestError) and match:
unused_params = [kwarg.strip("' ") for kwarg in match.group(1).split(",")]
_set_unsupported_text_generation_kwargs(model, unused_params)
return self.text_generation( # type: ignore
prompt=prompt,
details=details,
stream=stream,
model=model,
adapter_id=adapter_id,
best_of=best_of,
decoder_input_details=decoder_input_details,
do_sample=do_sample,
frequency_penalty=frequency_penalty,
grammar=grammar,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop,
temperature=temperature,
top_k=top_k,
top_n_tokens=top_n_tokens,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
)
raise_text_generation_error(e)
# Parse output
if stream:
return _stream_text_generation_response(bytes_output, details) # type: ignore
data = _bytes_to_dict(bytes_output) # type: ignore[arg-type]
# Data can be a single element (dict) or an iterable of dicts where we select the first element of.
if isinstance(data, list):
data = data[0]
return TextGenerationOutput.parse_obj_as_instance(data) if details else data["generated_text"]
def text_to_image(
self,
prompt: str,
*,
negative_prompt: Optional[List[str]] = None,
height: Optional[float] = None,
width: Optional[float] = None,
num_inference_steps: Optional[int] = None,
guidance_scale: Optional[float] = None,
model: Optional[str] = None,
scheduler: Optional[str] = None,
target_size: Optional[TextToImageTargetSize] = None,
seed: Optional[int] = None,
**kwargs,
) -> "Image":
"""
Generate an image based on a given text using a specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
prompt (`str`):
The prompt to generate an image from.
negative_prompt (`List[str`, *optional*):
One or several prompt to guide what NOT to include in image generation.
height (`float`, *optional*):
The height in pixels of the image to generate.
width (`float`, *optional*):
The width in pixels of the image to generate.
num_inference_steps (`int`, *optional*):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*):
A higher guidance scale value encourages the model to generate images closely linked to the text
prompt, but values too high may cause saturation and other artifacts.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended text-to-image model will be used.
Defaults to None.
scheduler (`str`, *optional*):
Override the scheduler with a compatible one.
target_size (`TextToImageTargetSize`, *optional*):
The size in pixel of the output image
seed (`int`, *optional*):
Seed for the random number generator.
Returns:
`Image`: The generated image.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> image = client.text_to_image("An astronaut riding a horse on the moon.")
>>> image.save("astronaut.png")
>>> image = client.text_to_image(
... "An astronaut riding a horse on the moon.",
... negative_prompt="low resolution, blurry",
... model="stabilityai/stable-diffusion-2-1",
... )
>>> image.save("better_astronaut.png")
```
"""
parameters = {
"negative_prompt": negative_prompt,
"height": height,
"width": width,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
"scheduler": scheduler,
"target_size": target_size,
"seed": seed,
**kwargs,
}
payload = _prepare_payload(prompt, parameters=parameters)
response = self.post(**payload, model=model, task="text-to-image")
return _bytes_to_image(response)
def text_to_speech(
self,
text: str,
*,
model: Optional[str] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None,
epsilon_cutoff: Optional[float] = None,
eta_cutoff: Optional[float] = None,
max_length: Optional[int] = None,
max_new_tokens: Optional[int] = None,
min_length: Optional[int] = None,
min_new_tokens: Optional[int] = None,
num_beam_groups: Optional[int] = None,
num_beams: Optional[int] = None,
penalty_alpha: Optional[float] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
typical_p: Optional[float] = None,
use_cache: Optional[bool] = None,
) -> bytes:
"""
Synthesize an audio of a voice pronouncing a given text.
Args:
text (`str`):
The text to synthesize.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended text-to-speech model will be used.
Defaults to None.
do_sample (`bool`, *optional*):
Whether to use sampling instead of greedy decoding when generating new tokens.
early_stopping (`Union[bool, "TextToSpeechEarlyStoppingEnum"]`, *optional*):
Controls the stopping condition for beam-based methods.
epsilon_cutoff (`float`, *optional*):
If set to float strictly between 0 and 1, only tokens with a conditional probability greater than
epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on
the size of the model. See [Truncation Sampling as Language Model
Desmoothing](https://hf.co/papers/2210.15191) for more details.
eta_cutoff (`float`, *optional*):
Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly
between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff)
* exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token
probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3,
depending on the size of the model. See [Truncation Sampling as Language Model
Desmoothing](https://hf.co/papers/2210.15191) for more details.
max_length (`int`, *optional*):
The maximum length (in tokens) of the generated text, including the input.
max_new_tokens (`int`, *optional*):
The maximum number of tokens to generate. Takes precedence over max_length.
min_length (`int`, *optional*):
The minimum length (in tokens) of the generated text, including the input.
min_new_tokens (`int`, *optional*):
The minimum number of tokens to generate. Takes precedence over min_length.
num_beam_groups (`int`, *optional*):
Number of groups to divide num_beams into in order to ensure diversity among different groups of beams.
See [this paper](https://hf.co/papers/1610.02424) for more details.
num_beams (`int`, *optional*):
Number of beams to use for beam search.
penalty_alpha (`float`, *optional*):
The value balances the model confidence and the degeneration penalty in contrastive search decoding.
temperature (`float`, *optional*):
The value used to modulate the next token probabilities.
top_k (`int`, *optional*):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`, *optional*):
If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
top_p or higher are kept for generation.
typical_p (`float`, *optional*):
Local typicality measures how similar the conditional probability of predicting a target token next is
to the expected conditional probability of predicting a random token next, given the partial text
already generated. If set to float < 1, the smallest set of the most locally typical tokens with
probabilities that add up to typical_p or higher are kept for generation. See [this
paper](https://hf.co/papers/2202.00666) for more details.
use_cache (`bool`, *optional*):
Whether the model should use the past last key/values attentions to speed up decoding
Returns:
`bytes`: The generated audio.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from pathlib import Path
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> audio = client.text_to_speech("Hello world")
>>> Path("hello_world.flac").write_bytes(audio)
```
"""
parameters = {
"do_sample": do_sample,
"early_stopping": early_stopping,
"epsilon_cutoff": epsilon_cutoff,
"eta_cutoff": eta_cutoff,
"max_length": max_length,
"max_new_tokens": max_new_tokens,
"min_length": min_length,
"min_new_tokens": min_new_tokens,
"num_beam_groups": num_beam_groups,
"num_beams": num_beams,
"penalty_alpha": penalty_alpha,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
"typical_p": typical_p,
"use_cache": use_cache,
}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(**payload, model=model, task="text-to-speech")
return response
def token_classification(
self,
text: str,
*,
model: Optional[str] = None,
aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None,
ignore_labels: Optional[List[str]] = None,
stride: Optional[int] = None,
) -> List[TokenClassificationOutputElement]:
"""
Perform token classification on the given text.
Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text.
Args:
text (`str`):
A string to be classified.
model (`str`, *optional*):
The model to use for the token classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended token classification model will be used.
Defaults to None.
aggregation_strategy (`"TokenClassificationAggregationStrategy"`, *optional*):
The strategy used to fuse tokens based on model predictions
ignore_labels (`List[str`, *optional*):
A list of labels to ignore
stride (`int`, *optional*):
The number of overlapping tokens between chunks when splitting the input text.
Returns:
`List[TokenClassificationOutputElement]`: List of [`TokenClassificationOutputElement`] items containing the entity group, confidence score, word, start and end index.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.token_classification("My name is Sarah Jessica Parker but you can call me Jessica")
[
TokenClassificationOutputElement(
entity_group='PER',
score=0.9971321225166321,
word='Sarah Jessica Parker',
start=11,
end=31,
),
TokenClassificationOutputElement(
entity_group='PER',
score=0.9773476123809814,
word='Jessica',
start=52,
end=59,
)
]
```
"""
parameters = {
"aggregation_strategy": aggregation_strategy,
"ignore_labels": ignore_labels,
"stride": stride,
}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(
**payload,
model=model,
task="token-classification",
)
return TokenClassificationOutputElement.parse_obj_as_list(response)
def translation(
self,
text: str,
*,
model: Optional[str] = None,
src_lang: Optional[str] = None,
tgt_lang: Optional[str] = None,
clean_up_tokenization_spaces: Optional[bool] = None,
truncation: Optional["TranslationTruncationStrategy"] = None,
generate_parameters: Optional[Dict[str, Any]] = None,
) -> TranslationOutput:
"""
Convert text from one language to another.
Check out https://huggingface.co/tasks/translation for more information on how to choose the best model for
your specific use case. Source and target languages usually depend on the model.
However, it is possible to specify source and target languages for certain models. If you are working with one of these models,
you can use `src_lang` and `tgt_lang` arguments to pass the relevant information.
Args:
text (`str`):
A string to be translated.
model (`str`, *optional*):
The model to use for the translation task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended translation model will be used.
Defaults to None.
src_lang (`str`, *optional*):
The source language of the text. Required for models that can translate from multiple languages.
tgt_lang (`str`, *optional*):
Target language to translate to. Required for models that can translate to multiple languages.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether to clean up the potential extra spaces in the text output.
truncation (`"TranslationTruncationStrategy"`, *optional*):
The truncation strategy to use.
generate_parameters (`Dict[str, Any]`, *optional*):
Additional parametrization of the text generation algorithm.
Returns:
[`TranslationOutput`]: The generated translated text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
`ValueError`:
If only one of the `src_lang` and `tgt_lang` arguments are provided.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.translation("My name is Wolfgang and I live in Berlin")
'Mein Name ist Wolfgang und ich lebe in Berlin.'
>>> client.translation("My name is Wolfgang and I live in Berlin", model="Helsinki-NLP/opus-mt-en-fr")
TranslationOutput(translation_text='Je m'appelle Wolfgang et je vis à Berlin.')
```
Specifying languages:
```py
>>> client.translation("My name is Sarah Jessica Parker but you can call me Jessica", model="facebook/mbart-large-50-many-to-many-mmt", src_lang="en_XX", tgt_lang="fr_XX")
"Mon nom est Sarah Jessica Parker mais vous pouvez m'appeler Jessica"
```
"""
# Throw error if only one of `src_lang` and `tgt_lang` was given
if src_lang is not None and tgt_lang is None:
raise ValueError("You cannot specify `src_lang` without specifying `tgt_lang`.")
if src_lang is None and tgt_lang is not None:
raise ValueError("You cannot specify `tgt_lang` without specifying `src_lang`.")
parameters = {
"src_lang": src_lang,
"tgt_lang": tgt_lang,
"clean_up_tokenization_spaces": clean_up_tokenization_spaces,
"truncation": truncation,
"generate_parameters": generate_parameters,
}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(**payload, model=model, task="translation")
return TranslationOutput.parse_obj_as_list(response)[0]
def visual_question_answering(
self,
image: ContentT,
question: str,
*,
model: Optional[str] = None,
top_k: Optional[int] = None,
) -> List[VisualQuestionAnsweringOutputElement]:
"""
Answering open-ended questions based on an image.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image for the context. It can be raw bytes, an image file, or a URL to an online image.
question (`str`):
Question to be answered.
model (`str`, *optional*):
The model to use for the visual question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended visual question answering model will be used.
Defaults to None.
top_k (`int`, *optional*):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
topk answers if there are not enough options available within the context.
Returns:
`List[VisualQuestionAnsweringOutputElement]`: a list of [`VisualQuestionAnsweringOutputElement`] items containing the predicted label and associated probability.
Raises:
`InferenceTimeoutError`:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.visual_question_answering(
... image="https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg",
... question="What is the animal doing?"
... )
[
VisualQuestionAnsweringOutputElement(score=0.778609573841095, answer='laying down'),
VisualQuestionAnsweringOutputElement(score=0.6957435607910156, answer='sitting'),
]
```
"""
payload: Dict[str, Any] = {"question": question, "image": _b64_encode(image)}
if top_k is not None:
payload.setdefault("parameters", {})["top_k"] = top_k
response = self.post(json=payload, model=model, task="visual-question-answering")
return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response)
@_deprecate_arguments(
version="0.30.0",
deprecated_args=["labels"],
custom_message="`labels`has been renamed to `candidate_labels` and will be removed in huggingface_hub>=0.30.0.",
)
def zero_shot_classification(
self,
text: str,
# temporarily keeping it optional for backward compatibility.
candidate_labels: List[str] = None, # type: ignore
*,
multi_label: Optional[bool] = False,
hypothesis_template: Optional[str] = None,
model: Optional[str] = None,
# deprecated argument
labels: List[str] = None, # type: ignore
) -> List[ZeroShotClassificationOutputElement]:
"""
Provide as input a text and a set of candidate labels to classify the input text.
Args:
text (`str`):
The input text to classify.
candidate_labels (`List[str]`):
The set of possible class labels to classify the text into.
labels (`List[str]`, *optional*):
(deprecated) List of strings. Each string is the verbalization of a possible label for the input text.
multi_label (`bool`, *optional*):
Whether multiple candidate labels can be true. If false, the scores are normalized such that the sum of
the label likelihoods for each sequence is 1. If true, the labels are considered independent and
probabilities are normalized for each candidate.
hypothesis_template (`str`, *optional*):
The sentence used in conjunction with `candidate_labels` to attempt the text classification by
replacing the placeholder with the candidate labels.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot classification model will be used.
Returns:
`List[ZeroShotClassificationOutputElement]`: List of [`ZeroShotClassificationOutputElement`] items containing the predicted labels and their confidence.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example with `multi_label=False`:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> text = (
... "A new model offers an explanation for how the Galilean satellites formed around the solar system's"
... "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling"
... " mysteries when he went for a run up a hill in Nice, France."
... )
>>> labels = ["space & cosmos", "scientific discovery", "microbiology", "robots", "archeology"]
>>> client.zero_shot_classification(text, labels)
[
ZeroShotClassificationOutputElement(label='scientific discovery', score=0.7961668968200684),
ZeroShotClassificationOutputElement(label='space & cosmos', score=0.18570658564567566),
ZeroShotClassificationOutputElement(label='microbiology', score=0.00730885099619627),
ZeroShotClassificationOutputElement(label='archeology', score=0.006258360575884581),
ZeroShotClassificationOutputElement(label='robots', score=0.004559356719255447),
]
>>> client.zero_shot_classification(text, labels, multi_label=True)
[
ZeroShotClassificationOutputElement(label='scientific discovery', score=0.9829297661781311),
ZeroShotClassificationOutputElement(label='space & cosmos', score=0.755190908908844),
ZeroShotClassificationOutputElement(label='microbiology', score=0.0005462635890580714),
ZeroShotClassificationOutputElement(label='archeology', score=0.00047131875180639327),
ZeroShotClassificationOutputElement(label='robots', score=0.00030448526376858354),
]
```
Example with `multi_label=True` and a custom `hypothesis_template`:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.zero_shot_classification(
... text="I really like our dinner and I'm very happy. I don't like the weather though.",
... labels=["positive", "negative", "pessimistic", "optimistic"],
... multi_label=True,
... hypothesis_template="This text is {} towards the weather"
... )
[
ZeroShotClassificationOutputElement(label='negative', score=0.9231801629066467),
ZeroShotClassificationOutputElement(label='pessimistic', score=0.8760990500450134),
ZeroShotClassificationOutputElement(label='optimistic', score=0.0008674879791215062),
ZeroShotClassificationOutputElement(label='positive', score=0.0005250611575320363)
]
```
"""
# handle deprecation
if labels is not None:
if candidate_labels is not None:
raise ValueError(
"Cannot specify both `labels` and `candidate_labels`. Use `candidate_labels` instead."
)
candidate_labels = labels
elif candidate_labels is None:
raise ValueError("Must specify `candidate_labels`")
parameters = {
"candidate_labels": candidate_labels,
"multi_label": multi_label,
"hypothesis_template": hypothesis_template,
}
payload = _prepare_payload(text, parameters=parameters)
response = self.post(
**payload,
task="zero-shot-classification",
model=model,
)
output = _bytes_to_dict(response)
return [
ZeroShotClassificationOutputElement.parse_obj_as_instance({"label": label, "score": score})
for label, score in zip(output["labels"], output["scores"])
]
@_deprecate_arguments(
version="0.30.0",
deprecated_args=["labels"],
custom_message="`labels`has been renamed to `candidate_labels` and will be removed in huggingface_hub>=0.30.0.",
)
def zero_shot_image_classification(
self,
image: ContentT,
# temporarily keeping it optional for backward compatibility.
candidate_labels: List[str] = None, # type: ignore
*,
model: Optional[str] = None,
hypothesis_template: Optional[str] = None,
# deprecated argument
labels: List[str] = None, # type: ignore
) -> List[ZeroShotImageClassificationOutputElement]:
"""
Provide input image and text labels to predict text labels for the image.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image to caption. It can be raw bytes, an image file, or a URL to an online image.
candidate_labels (`List[str]`):
The candidate labels for this image
labels (`List[str]`, *optional*):
(deprecated) List of string possible labels. There must be at least 2 labels.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot image classification model will be used.
hypothesis_template (`str`, *optional*):
The sentence used in conjunction with `candidate_labels` to attempt the image classification by
replacing the placeholder with the candidate labels.
Returns:
`List[ZeroShotImageClassificationOutputElement]`: List of [`ZeroShotImageClassificationOutputElement`] items containing the predicted labels and their confidence.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`HTTPError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.zero_shot_image_classification(
... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg",
... labels=["dog", "cat", "horse"],
... )
[ZeroShotImageClassificationOutputElement(label='dog', score=0.956),...]
```
"""
# handle deprecation
if labels is not None:
if candidate_labels is not None:
raise ValueError(
"Cannot specify both `labels` and `candidate_labels`. Use `candidate_labels` instead."
)
candidate_labels = labels
elif candidate_labels is None:
raise ValueError("Must specify `candidate_labels`")
# Raise ValueError if input is less than 2 labels
if len(candidate_labels) < 2:
raise ValueError("You must specify at least 2 classes to compare.")
parameters = {
"candidate_labels": candidate_labels,
"hypothesis_template": hypothesis_template,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = self.post(
**payload,
model=model,
task="zero-shot-image-classification",
)
return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response)
def _resolve_url(self, model: Optional[str] = None, task: Optional[str] = None) -> str:
model = model or self.model or self.base_url
# If model is already a URL, ignore `task` and return directly
if model is not None and (model.startswith("http://") or model.startswith("https://")):
return model
# # If no model but task is set => fetch the recommended one for this task
if model is None:
if task is None:
raise ValueError(
"You must specify at least a model (repo_id or URL) or a task, either when instantiating"
" `InferenceClient` or when making a request."
)
model = self.get_recommended_model(task)
logger.info(
f"Using recommended model {model} for task {task}. Note that it is"
f" encouraged to explicitly set `model='{model}'` as the recommended"
" models list might get updated without prior notice."
)
# Compute InferenceAPI url
return (
# Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks.
f"{INFERENCE_ENDPOINT}/pipeline/{task}/{model}"
if task in ("feature-extraction", "sentence-similarity")
# Otherwise, we use the default endpoint
else f"{INFERENCE_ENDPOINT}/models/{model}"
)
@staticmethod
def get_recommended_model(task: str) -> str:
"""
Get the model Hugging Face recommends for the input task.
Args:
task (`str`):
The Hugging Face task to get which model Hugging Face recommends.
All available tasks can be found [here](https://huggingface.co/tasks).
Returns:
`str`: Name of the model recommended for the input task.
Raises:
`ValueError`: If Hugging Face has no recommendation for the input task.
"""
model = _fetch_recommended_models().get(task)
if model is None:
raise ValueError(
f"Task {task} has no recommended model. Please specify a model"
" explicitly. Visit https://huggingface.co/tasks for more info."
)
return model
def get_endpoint_info(self, *, model: Optional[str] = None) -> Dict[str, Any]:
"""
Get information about the deployed endpoint.
This endpoint is only available on endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI).
Endpoints powered by `transformers` return an empty payload.
Args:
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
Returns:
`Dict[str, Any]`: Information about the endpoint.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
>>> client.get_endpoint_info()
{
'model_id': 'meta-llama/Meta-Llama-3-70B-Instruct',
'model_sha': None,
'model_dtype': 'torch.float16',
'model_device_type': 'cuda',
'model_pipeline_tag': None,
'max_concurrent_requests': 128,
'max_best_of': 2,
'max_stop_sequences': 4,
'max_input_length': 8191,
'max_total_tokens': 8192,
'waiting_served_ratio': 0.3,
'max_batch_total_tokens': 1259392,
'max_waiting_tokens': 20,
'max_batch_size': None,
'validation_workers': 32,
'max_client_batch_size': 4,
'version': '2.0.2',
'sha': 'dccab72549635c7eb5ddb17f43f0b7cdff07c214',
'docker_label': 'sha-dccab72'
}
```
"""
model = model or self.model
if model is None:
raise ValueError("Model id not provided.")
if model.startswith(("http://", "https://")):
url = model.rstrip("/") + "/info"
else:
url = f"{INFERENCE_ENDPOINT}/models/{model}/info"
response = get_session().get(url, headers=self.headers)
hf_raise_for_status(response)
return response.json()
def health_check(self, model: Optional[str] = None) -> bool:
"""
Check the health of the deployed endpoint.
Health check is only available with Inference Endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI).
For Inference API, please use [`InferenceClient.get_model_status`] instead.
Args:
model (`str`, *optional*):
URL of the Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
Returns:
`bool`: True if everything is working fine.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient("https://jzgu0buei5.us-east-1.aws.endpoints.huggingface.cloud")
>>> client.health_check()
True
```
"""
model = model or self.model
if model is None:
raise ValueError("Model id not provided.")
if not model.startswith(("http://", "https://")):
raise ValueError(
"Model must be an Inference Endpoint URL. For serverless Inference API, please use `InferenceClient.get_model_status`."
)
url = model.rstrip("/") + "/health"
response = get_session().get(url, headers=self.headers)
return response.status_code == 200
def get_model_status(self, model: Optional[str] = None) -> ModelStatus:
"""
Get the status of a model hosted on the Inference API.
<Tip>
This endpoint is mostly useful when you already know which model you want to use and want to check its
availability. If you want to discover already deployed models, you should rather use [`~InferenceClient.list_deployed_models`].
</Tip>
Args:
model (`str`, *optional*):
Identifier of the model for witch the status gonna be checked. If model is not provided,
the model associated with this instance of [`InferenceClient`] will be used. Only InferenceAPI service can be checked so the
identifier cannot be a URL.
Returns:
[`ModelStatus`]: An instance of ModelStatus dataclass, containing information,
about the state of the model: load, state, compute type and framework.
Example:
```py
>>> from huggingface_hub import InferenceClient
>>> client = InferenceClient()
>>> client.get_model_status("meta-llama/Meta-Llama-3-8B-Instruct")
ModelStatus(loaded=True, state='Loaded', compute_type='gpu', framework='text-generation-inference')
```
"""
model = model or self.model
if model is None:
raise ValueError("Model id not provided.")
if model.startswith("https://"):
raise NotImplementedError("Model status is only available for Inference API endpoints.")
url = f"{INFERENCE_ENDPOINT}/status/{model}"
response = get_session().get(url, headers=self.headers)
hf_raise_for_status(response)
response_data = response.json()
if "error" in response_data:
raise ValueError(response_data["error"])
return ModelStatus(
loaded=response_data["loaded"],
state=response_data["state"],
compute_type=response_data["compute_type"],
framework=response_data["framework"],
)
@property
def chat(self) -> "ProxyClientChat":
return ProxyClientChat(self) | class_definition | 4,523 | 148,405 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_client.py | null | 139 |
class _ProxyClient:
"""Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client."""
def __init__(self, client: InferenceClient):
self._client = client | class_definition | 148,408 | 148,603 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_client.py | null | 140 |
class ProxyClientChat(_ProxyClient):
"""Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client."""
@property
def completions(self) -> "ProxyClientChatCompletions":
return ProxyClientChatCompletions(self._client) | class_definition | 148,606 | 148,868 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_client.py | null | 141 |
class ProxyClientChatCompletions(_ProxyClient):
"""Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client."""
@property
def create(self):
return self._client.chat_completion | class_definition | 148,871 | 149,095 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_client.py | null | 142 |
class AsyncInferenceClient:
"""
Initialize a new Inference Client.
[`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used
seamlessly with either the (free) Inference API or self-hosted Inference Endpoints.
Args:
model (`str`, `optional`):
The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is
automatically selected for the task.
Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
arguments are mutually exclusive. If using `base_url` for chat completion, the `/chat/completions` suffix
path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api)
documentation for details). When passing a URL as `model`, the client will not append any suffix path to it.
token (`str` or `bool`, *optional*):
Hugging Face token. Will default to the locally saved token if not provided.
Pass `token=False` if you don't want to send your token to the server.
Note: for better compatibility with OpenAI's client, `token` has been aliased as `api_key`. Those 2
arguments are mutually exclusive and have the exact same behavior.
timeout (`float`, `optional`):
The maximum number of seconds to wait for a response from the server. Loading a new model in Inference
API can take up to several minutes. Defaults to None, meaning it will loop until the server is available.
headers (`Dict[str, str]`, `optional`):
Additional headers to send to the server. By default only the authorization and user-agent headers are sent.
Values in this dictionary will override the default values.
cookies (`Dict[str, str]`, `optional`):
Additional cookies to send to the server.
trust_env ('bool', 'optional'):
Trust environment settings for proxy configuration if the parameter is `True` (`False` by default).
proxies (`Any`, `optional`):
Proxies to use for the request.
base_url (`str`, `optional`):
Base URL to run inference. This is a duplicated argument from `model` to make [`InferenceClient`]
follow the same pattern as `openai.OpenAI` client. Cannot be used if `model` is set. Defaults to None.
api_key (`str`, `optional`):
Token to use for authentication. This is a duplicated argument from `token` to make [`InferenceClient`]
follow the same pattern as `openai.OpenAI` client. Cannot be used if `token` is set. Defaults to None.
"""
def __init__(
self,
model: Optional[str] = None,
*,
token: Union[str, bool, None] = None,
timeout: Optional[float] = None,
headers: Optional[Dict[str, str]] = None,
cookies: Optional[Dict[str, str]] = None,
trust_env: bool = False,
proxies: Optional[Any] = None,
# OpenAI compatibility
base_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
if model is not None and base_url is not None:
raise ValueError(
"Received both `model` and `base_url` arguments. Please provide only one of them."
" `base_url` is an alias for `model` to make the API compatible with OpenAI's client."
" If using `base_url` for chat completion, the `/chat/completions` suffix path will be appended to the base url."
" When passing a URL as `model`, the client will not append any suffix path to it."
)
if token is not None and api_key is not None:
raise ValueError(
"Received both `token` and `api_key` arguments. Please provide only one of them."
" `api_key` is an alias for `token` to make the API compatible with OpenAI's client."
" It has the exact same behavior as `token`."
)
self.model: Optional[str] = model
self.token: Union[str, bool, None] = token if token is not None else api_key
self.headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(
build_hf_headers(token=self.token) # 'authorization' + 'user-agent'
)
if headers is not None:
self.headers.update(headers)
self.cookies = cookies
self.timeout = timeout
self.trust_env = trust_env
self.proxies = proxies
# OpenAI compatibility
self.base_url = base_url
# Keep track of the sessions to close them properly
self._sessions: Dict["ClientSession", Set["ClientResponse"]] = dict()
def __repr__(self):
return f"<InferenceClient(model='{self.model if self.model else ''}', timeout={self.timeout})>"
@overload
async def post( # type: ignore[misc]
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: Literal[False] = ...,
) -> bytes: ...
@overload
async def post( # type: ignore[misc]
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: Literal[True] = ...,
) -> AsyncIterable[bytes]: ...
@overload
async def post(
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: bool = False,
) -> Union[bytes, AsyncIterable[bytes]]: ...
async def post(
self,
*,
json: Optional[Union[str, Dict, List]] = None,
data: Optional[ContentT] = None,
model: Optional[str] = None,
task: Optional[str] = None,
stream: bool = False,
) -> Union[bytes, AsyncIterable[bytes]]:
"""
Make a POST request to the inference server.
Args:
json (`Union[str, Dict, List]`, *optional*):
The JSON data to send in the request body, specific to each task. Defaults to None.
data (`Union[str, Path, bytes, BinaryIO]`, *optional*):
The content to send in the request body, specific to each task.
It can be raw bytes, a pointer to an opened file, a local file path,
or a URL to an online resource (image, audio file,...). If both `json` and `data` are passed,
`data` will take precedence. At least `json` or `data` must be provided. Defaults to None.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. Will override the model defined at the instance level. Defaults to None.
task (`str`, *optional*):
The task to perform on the inference. All available tasks can be found
[here](https://huggingface.co/tasks). Used only to default to a recommended model if `model` is not
provided. At least `model` or `task` must be provided. Defaults to None.
stream (`bool`, *optional*):
Whether to iterate over streaming APIs.
Returns:
bytes: The raw bytes returned by the server.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
"""
aiohttp = _import_aiohttp()
url = self._resolve_url(model, task)
if data is not None and json is not None:
warnings.warn("Ignoring `json` as `data` is passed as binary.")
# Set Accept header if relevant
headers = dict()
if task in TASKS_EXPECTING_IMAGES and "Accept" not in headers:
headers["Accept"] = "image/png"
t0 = time.time()
timeout = self.timeout
while True:
with _open_as_binary(data) as data_as_binary:
# Do not use context manager as we don't want to close the connection immediately when returning
# a stream
session = self._get_client_session(headers=headers)
try:
response = await session.post(url, json=json, data=data_as_binary, proxy=self.proxies)
response_error_payload = None
if response.status != 200:
try:
response_error_payload = await response.json() # get payload before connection closed
except Exception:
pass
response.raise_for_status()
if stream:
return _async_yield_from(session, response)
else:
content = await response.read()
await session.close()
return content
except asyncio.TimeoutError as error:
await session.close()
# Convert any `TimeoutError` to a `InferenceTimeoutError`
raise InferenceTimeoutError(f"Inference call timed out: {url}") from error # type: ignore
except aiohttp.ClientResponseError as error:
error.response_error_payload = response_error_payload
await session.close()
if response.status == 422 and task is not None:
error.message += f". Make sure '{task}' task is supported by the model."
if response.status == 503:
# If Model is unavailable, either raise a TimeoutError...
if timeout is not None and time.time() - t0 > timeout:
raise InferenceTimeoutError(
f"Model not loaded on the server: {url}. Please retry with a higher timeout"
f" (current: {self.timeout}).",
request=error.request,
response=error.response,
) from error
# ...or wait 1s and retry
logger.info(f"Waiting for model to be loaded on the server: {error}")
if "X-wait-for-model" not in headers and url.startswith(INFERENCE_ENDPOINT):
headers["X-wait-for-model"] = "1"
await asyncio.sleep(1)
if timeout is not None:
timeout = max(self.timeout - (time.time() - t0), 1) # type: ignore
continue
raise error
except Exception:
await session.close()
raise
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
def __del__(self):
if len(self._sessions) > 0:
warnings.warn(
"Deleting 'AsyncInferenceClient' client but some sessions are still open. "
"This can happen if you've stopped streaming data from the server before the stream was complete. "
"To close the client properly, you must call `await client.close()` "
"or use an async context (e.g. `async with AsyncInferenceClient(): ...`."
)
async def close(self):
"""Close all open sessions.
By default, 'aiohttp.ClientSession' objects are closed automatically when a call is completed. However, if you
are streaming data from the server and you stop before the stream is complete, you must call this method to
close the session properly.
Another possibility is to use an async context (e.g. `async with AsyncInferenceClient(): ...`).
"""
await asyncio.gather(*[session.close() for session in self._sessions.keys()])
async def audio_classification(
self,
audio: ContentT,
*,
model: Optional[str] = None,
top_k: Optional[int] = None,
function_to_apply: Optional["AudioClassificationOutputTransform"] = None,
) -> List[AudioClassificationOutputElement]:
"""
Perform audio classification on the provided audio content.
Args:
audio (Union[str, Path, bytes, BinaryIO]):
The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an
audio file.
model (`str`, *optional*):
The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub
or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for
audio classification will be used.
top_k (`int`, *optional*):
When specified, limits the output to the top K most probable classes.
function_to_apply (`"AudioClassificationOutputTransform"`, *optional*):
The function to apply to the model outputs in order to retrieve the scores.
Returns:
`List[AudioClassificationOutputElement]`: List of [`AudioClassificationOutputElement`] items containing the predicted labels and their confidence.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.audio_classification("audio.flac")
[
AudioClassificationOutputElement(score=0.4976358711719513, label='hap'),
AudioClassificationOutputElement(score=0.3677836060523987, label='neu'),
...
]
```
"""
parameters = {"function_to_apply": function_to_apply, "top_k": top_k}
payload = _prepare_payload(audio, parameters=parameters, expect_binary=True)
response = await self.post(**payload, model=model, task="audio-classification")
return AudioClassificationOutputElement.parse_obj_as_list(response)
async def audio_to_audio(
self,
audio: ContentT,
*,
model: Optional[str] = None,
) -> List[AudioToAudioOutputElement]:
"""
Performs multiple tasks related to audio-to-audio depending on the model (eg: speech enhancement, source separation).
Args:
audio (Union[str, Path, bytes, BinaryIO]):
The audio content for the model. It can be raw audio bytes, a local audio file, or a URL pointing to an
audio file.
model (`str`, *optional*):
The model can be any model which takes an audio file and returns another audio file. Can be a model ID hosted on the Hugging Face Hub
or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for
audio_to_audio will be used.
Returns:
`List[AudioToAudioOutputElement]`: A list of [`AudioToAudioOutputElement`] items containing audios label, content-type, and audio content in blob.
Raises:
`InferenceTimeoutError`:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> audio_output = await client.audio_to_audio("audio.flac")
>>> async for i, item in enumerate(audio_output):
>>> with open(f"output_{i}.flac", "wb") as f:
f.write(item.blob)
```
"""
response = await self.post(data=audio, model=model, task="audio-to-audio")
audio_output = AudioToAudioOutputElement.parse_obj_as_list(response)
for item in audio_output:
item.blob = base64.b64decode(item.blob)
return audio_output
async def automatic_speech_recognition(
self,
audio: ContentT,
*,
model: Optional[str] = None,
) -> AutomaticSpeechRecognitionOutput:
"""
Perform automatic speech recognition (ASR or audio-to-text) on the given audio content.
Args:
audio (Union[str, Path, bytes, BinaryIO]):
The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file.
model (`str`, *optional*):
The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended model for ASR will be used.
Returns:
[`AutomaticSpeechRecognitionOutput`]: An item containing the transcribed text and optionally the timestamp chunks.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.automatic_speech_recognition("hello_world.flac").text
"hello world"
```
"""
response = await self.post(data=audio, model=model, task="automatic-speech-recognition")
return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response)
@overload
async def chat_completion( # type: ignore
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: Literal[False] = False,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> ChatCompletionOutput: ...
@overload
async def chat_completion( # type: ignore
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: Literal[True] = True,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> AsyncIterable[ChatCompletionStreamOutput]: ...
@overload
async def chat_completion(
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: bool = False,
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]: ...
async def chat_completion(
self,
messages: List[Dict],
*,
model: Optional[str] = None,
stream: bool = False,
# Parameters from ChatCompletionInput (handled manually)
frequency_penalty: Optional[float] = None,
logit_bias: Optional[List[float]] = None,
logprobs: Optional[bool] = None,
max_tokens: Optional[int] = None,
n: Optional[int] = None,
presence_penalty: Optional[float] = None,
response_format: Optional[ChatCompletionInputGrammarType] = None,
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stream_options: Optional[ChatCompletionInputStreamOptions] = None,
temperature: Optional[float] = None,
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None,
tool_prompt: Optional[str] = None,
tools: Optional[List[ChatCompletionInputTool]] = None,
top_logprobs: Optional[int] = None,
top_p: Optional[float] = None,
) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]:
"""
A method for completing conversations using a specified language model.
<Tip>
The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
Inputs and outputs are strictly the same and using either syntax will yield the same results.
Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
for more details about OpenAI's compatibility.
</Tip>
Args:
messages (List of [`ChatCompletionInputMessage`]):
Conversation history consisting of roles and content pairs.
model (`str`, *optional*):
The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used.
See https://huggingface.co/tasks/text-generation for more details.
If `model` is a model ID, it is passed to the server as the `model` parameter. If you want to define a
custom URL while setting `model` in the request payload, you must set `base_url` when initializing [`InferenceClient`].
frequency_penalty (`float`, *optional*):
Penalizes new tokens based on their existing frequency
in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0.
logit_bias (`List[float]`, *optional*):
Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens
(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,
the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should
result in a ban or exclusive selection of the relevant token. Defaults to None.
logprobs (`bool`, *optional*):
Whether to return log probabilities of the output tokens or not. If true, returns the log
probabilities of each output token returned in the content of message.
max_tokens (`int`, *optional*):
Maximum number of tokens allowed in the response. Defaults to 100.
n (`int`, *optional*):
UNUSED.
presence_penalty (`float`, *optional*):
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the
text so far, increasing the model's likelihood to talk about new topics.
response_format ([`ChatCompletionInputGrammarType`], *optional*):
Grammar constraints. Can be either a JSONSchema or a regex.
seed (Optional[`int`], *optional*):
Seed for reproducible control flow. Defaults to None.
stop (Optional[`str`], *optional*):
Up to four strings which trigger the end of the response.
Defaults to None.
stream (`bool`, *optional*):
Enable realtime streaming of responses. Defaults to False.
stream_options ([`ChatCompletionInputStreamOptions`], *optional*):
Options for streaming completions.
temperature (`float`, *optional*):
Controls randomness of the generations. Lower values ensure
less random completions. Range: [0, 2]. Defaults to 1.0.
top_logprobs (`int`, *optional*):
An integer between 0 and 5 specifying the number of most likely tokens to return at each token
position, each with an associated log probability. logprobs must be set to true if this parameter is
used.
top_p (`float`, *optional*):
Fraction of the most likely next words to sample from.
Must be between 0 and 1. Defaults to 1.0.
tool_choice ([`ChatCompletionInputToolChoiceClass`] or [`ChatCompletionInputToolChoiceEnum`], *optional*):
The tool to use for the completion. Defaults to "auto".
tool_prompt (`str`, *optional*):
A prompt to be appended before the tools.
tools (List of [`ChatCompletionInputTool`], *optional*):
A list of tools the model may call. Currently, only functions are supported as a tool. Use this to
provide a list of functions the model may generate JSON inputs for.
Returns:
[`ChatCompletionOutput`] or Iterable of [`ChatCompletionStreamOutput`]:
Generated text returned from the server:
- if `stream=False`, the generated text is returned as a [`ChatCompletionOutput`] (default).
- if `stream=True`, the generated text is returned token by token as a sequence of [`ChatCompletionStreamOutput`].
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> messages = [{"role": "user", "content": "What is the capital of France?"}]
>>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
>>> await client.chat_completion(messages, max_tokens=100)
ChatCompletionOutput(
choices=[
ChatCompletionOutputComplete(
finish_reason='eos_token',
index=0,
message=ChatCompletionOutputMessage(
role='assistant',
content='The capital of France is Paris.',
name=None,
tool_calls=None
),
logprobs=None
)
],
created=1719907176,
id='',
model='meta-llama/Meta-Llama-3-8B-Instruct',
object='text_completion',
system_fingerprint='2.0.4-sha-f426a33',
usage=ChatCompletionOutputUsage(
completion_tokens=8,
prompt_tokens=17,
total_tokens=25
)
)
```
Example using streaming:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> messages = [{"role": "user", "content": "What is the capital of France?"}]
>>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
>>> async for token in await client.chat_completion(messages, max_tokens=10, stream=True):
... print(token)
ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content='The', role='assistant'), index=0, finish_reason=None)], created=1710498504)
ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' capital', role='assistant'), index=0, finish_reason=None)], created=1710498504)
(...)
ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' may', role='assistant'), index=0, finish_reason=None)], created=1710498504)
```
Example using OpenAI's syntax:
```py
# Must be run in an async context
# instead of `from openai import OpenAI`
from huggingface_hub import AsyncInferenceClient
# instead of `client = OpenAI(...)`
client = AsyncInferenceClient(
base_url=...,
api_key=...,
)
output = await client.chat.completions.create(
model="meta-llama/Meta-Llama-3-8B-Instruct",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Count to 10"},
],
stream=True,
max_tokens=1024,
)
for chunk in output:
print(chunk.choices[0].delta.content)
```
Example using Image + Text as input:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
# provide a remote URL
>>> image_url ="https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
# or a base64-encoded image
>>> image_path = "/path/to/image.jpeg"
>>> with open(image_path, "rb") as f:
... base64_image = base64.b64encode(f.read()).decode("utf-8")
>>> image_url = f"data:image/jpeg;base64,{base64_image}"
>>> client = AsyncInferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
>>> output = await client.chat.completions.create(
... messages=[
... {
... "role": "user",
... "content": [
... {
... "type": "image_url",
... "image_url": {"url": image_url},
... },
... {
... "type": "text",
... "text": "Describe this image in one sentence.",
... },
... ],
... },
... ],
... )
>>> output
The image depicts the iconic Statue of Liberty situated in New York Harbor, New York, on a clear day.
```
Example using tools:
```py
# Must be run in an async context
>>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
>>> messages = [
... {
... "role": "system",
... "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.",
... },
... {
... "role": "user",
... "content": "What's the weather like the next 3 days in San Francisco, CA?",
... },
... ]
>>> tools = [
... {
... "type": "function",
... "function": {
... "name": "get_current_weather",
... "description": "Get the current weather",
... "parameters": {
... "type": "object",
... "properties": {
... "location": {
... "type": "string",
... "description": "The city and state, e.g. San Francisco, CA",
... },
... "format": {
... "type": "string",
... "enum": ["celsius", "fahrenheit"],
... "description": "The temperature unit to use. Infer this from the users location.",
... },
... },
... "required": ["location", "format"],
... },
... },
... },
... {
... "type": "function",
... "function": {
... "name": "get_n_day_weather_forecast",
... "description": "Get an N-day weather forecast",
... "parameters": {
... "type": "object",
... "properties": {
... "location": {
... "type": "string",
... "description": "The city and state, e.g. San Francisco, CA",
... },
... "format": {
... "type": "string",
... "enum": ["celsius", "fahrenheit"],
... "description": "The temperature unit to use. Infer this from the users location.",
... },
... "num_days": {
... "type": "integer",
... "description": "The number of days to forecast",
... },
... },
... "required": ["location", "format", "num_days"],
... },
... },
... },
... ]
>>> response = await client.chat_completion(
... model="meta-llama/Meta-Llama-3-70B-Instruct",
... messages=messages,
... tools=tools,
... tool_choice="auto",
... max_tokens=500,
... )
>>> response.choices[0].message.tool_calls[0].function
ChatCompletionOutputFunctionDefinition(
arguments={
'location': 'San Francisco, CA',
'format': 'fahrenheit',
'num_days': 3
},
name='get_n_day_weather_forecast',
description=None
)
```
Example using response_format:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
>>> messages = [
... {
... "role": "user",
... "content": "I saw a puppy a cat and a raccoon during my bike ride in the park. What did I saw and when?",
... },
... ]
>>> response_format = {
... "type": "json",
... "value": {
... "properties": {
... "location": {"type": "string"},
... "activity": {"type": "string"},
... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5},
... "animals": {"type": "array", "items": {"type": "string"}},
... },
... "required": ["location", "activity", "animals_seen", "animals"],
... },
... }
>>> response = await client.chat_completion(
... messages=messages,
... response_format=response_format,
... max_tokens=500,
)
>>> response.choices[0].message.content
'{\n\n"activity": "bike ride",\n"animals": ["puppy", "cat", "raccoon"],\n"animals_seen": 3,\n"location": "park"}'
```
"""
model_url = self._resolve_chat_completion_url(model)
# `model` is sent in the payload. Not used by the server but can be useful for debugging/routing.
# If it's a ID on the Hub => use it. Otherwise, we use a random string.
model_id = model or self.model or "tgi"
if model_id.startswith(("http://", "https://")):
model_id = "tgi" # dummy value
payload = dict(
model=model_id,
messages=messages,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
logprobs=logprobs,
max_tokens=max_tokens,
n=n,
presence_penalty=presence_penalty,
response_format=response_format,
seed=seed,
stop=stop,
temperature=temperature,
tool_choice=tool_choice,
tool_prompt=tool_prompt,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
stream=stream,
stream_options=stream_options,
)
payload = {key: value for key, value in payload.items() if value is not None}
data = await self.post(model=model_url, json=payload, stream=stream)
if stream:
return _async_stream_chat_completion_response(data) # type: ignore[arg-type]
return ChatCompletionOutput.parse_obj_as_instance(data) # type: ignore[arg-type]
def _resolve_chat_completion_url(self, model: Optional[str] = None) -> str:
# Since `chat_completion(..., model=xxx)` is also a payload parameter for the server, we need to handle 'model' differently.
# `self.base_url` and `self.model` takes precedence over 'model' argument only in `chat_completion`.
model_id_or_url = self.base_url or self.model or model or self.get_recommended_model("text-generation")
# Resolve URL if it's a model ID
model_url = (
model_id_or_url
if model_id_or_url.startswith(("http://", "https://"))
else self._resolve_url(model_id_or_url, task="text-generation")
)
# Strip trailing /
model_url = model_url.rstrip("/")
# Append /chat/completions if not already present
if model_url.endswith("/v1"):
model_url += "/chat/completions"
# Append /v1/chat/completions if not already present
if not model_url.endswith("/chat/completions"):
model_url += "/v1/chat/completions"
return model_url
async def document_question_answering(
self,
image: ContentT,
question: str,
*,
model: Optional[str] = None,
doc_stride: Optional[int] = None,
handle_impossible_answer: Optional[bool] = None,
lang: Optional[str] = None,
max_answer_len: Optional[int] = None,
max_question_len: Optional[int] = None,
max_seq_len: Optional[int] = None,
top_k: Optional[int] = None,
word_boxes: Optional[List[Union[List[float], str]]] = None,
) -> List[DocumentQuestionAnsweringOutputElement]:
"""
Answer questions on document images.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image for the context. It can be raw bytes, an image file, or a URL to an online image.
question (`str`):
Question to be answered.
model (`str`, *optional*):
The model to use for the document question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended document question answering model will be used.
Defaults to None.
doc_stride (`int`, *optional*):
If the words in the document are too long to fit with the question for the model, it will be split in
several chunks with some overlap. This argument controls the size of that overlap.
handle_impossible_answer (`bool`, *optional*):
Whether to accept impossible as an answer
lang (`str`, *optional*):
Language to use while running OCR. Defaults to english.
max_answer_len (`int`, *optional*):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_question_len (`int`, *optional*):
The maximum length of the question after tokenization. It will be truncated if needed.
max_seq_len (`int`, *optional*):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using doc_stride as overlap) if needed.
top_k (`int`, *optional*):
The number of answers to return (will be chosen by order of likelihood). Can return less than top_k
answers if there are not enough options available within the context.
word_boxes (`List[Union[List[float], str`, *optional*):
A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR
step and use the provided bounding boxes instead.
Returns:
`List[DocumentQuestionAnsweringOutputElement]`: a list of [`DocumentQuestionAnsweringOutputElement`] items containing the predicted label, associated probability, word ids, and page number.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.document_question_answering(image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", question="What is the invoice number?")
[DocumentQuestionAnsweringOutputElement(answer='us-001', end=16, score=0.9999666213989258, start=16)]
```
"""
inputs: Dict[str, Any] = {"question": question, "image": _b64_encode(image)}
parameters = {
"doc_stride": doc_stride,
"handle_impossible_answer": handle_impossible_answer,
"lang": lang,
"max_answer_len": max_answer_len,
"max_question_len": max_question_len,
"max_seq_len": max_seq_len,
"top_k": top_k,
"word_boxes": word_boxes,
}
payload = _prepare_payload(inputs, parameters=parameters)
response = await self.post(**payload, model=model, task="document-question-answering")
return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response)
async def feature_extraction(
self,
text: str,
*,
normalize: Optional[bool] = None,
prompt_name: Optional[str] = None,
truncate: Optional[bool] = None,
truncation_direction: Optional[Literal["Left", "Right"]] = None,
model: Optional[str] = None,
) -> "np.ndarray":
"""
Generate embeddings for a given text.
Args:
text (`str`):
The text to embed.
model (`str`, *optional*):
The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
Defaults to None.
normalize (`bool`, *optional*):
Whether to normalize the embeddings or not.
Only available on server powered by Text-Embedding-Inference.
prompt_name (`str`, *optional*):
The name of the prompt that should be used by for encoding. If not set, no prompt will be applied.
Must be a key in the `Sentence Transformers` configuration `prompts` dictionary.
For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",...},
then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?"
because the prompt text will be prepended before any text to encode.
truncate (`bool`, *optional*):
Whether to truncate the embeddings or not.
Only available on server powered by Text-Embedding-Inference.
truncation_direction (`Literal["Left", "Right"]`, *optional*):
Which side of the input should be truncated when `truncate=True` is passed.
Returns:
`np.ndarray`: The embedding representing the input text as a float32 numpy array.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.feature_extraction("Hi, who are you?")
array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ],
[-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ],
...,
[ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32)
```
"""
parameters = {
"normalize": normalize,
"prompt_name": prompt_name,
"truncate": truncate,
"truncation_direction": truncation_direction,
}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(**payload, model=model, task="feature-extraction")
np = _import_numpy()
return np.array(_bytes_to_dict(response), dtype="float32")
async def fill_mask(
self,
text: str,
*,
model: Optional[str] = None,
targets: Optional[List[str]] = None,
top_k: Optional[int] = None,
) -> List[FillMaskOutputElement]:
"""
Fill in a hole with a missing word (token to be precise).
Args:
text (`str`):
a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask).
model (`str`, *optional*):
The model to use for the fill mask task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended fill mask model will be used.
targets (`List[str`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocabulary. If the provided targets are not in the model vocab, they will be tokenized and the first
resulting token will be used (with a warning, and that might be slower).
top_k (`int`, *optional*):
When passed, overrides the number of predictions to return.
Returns:
`List[FillMaskOutputElement]`: a list of [`FillMaskOutputElement`] items containing the predicted label, associated
probability, token reference, and completed text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.fill_mask("The goal of life is <mask>.")
[
FillMaskOutputElement(score=0.06897063553333282, token=11098, token_str=' happiness', sequence='The goal of life is happiness.'),
FillMaskOutputElement(score=0.06554922461509705, token=45075, token_str=' immortality', sequence='The goal of life is immortality.')
]
```
"""
parameters = {"targets": targets, "top_k": top_k}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(**payload, model=model, task="fill-mask")
return FillMaskOutputElement.parse_obj_as_list(response)
async def image_classification(
self,
image: ContentT,
*,
model: Optional[str] = None,
function_to_apply: Optional["ImageClassificationOutputTransform"] = None,
top_k: Optional[int] = None,
) -> List[ImageClassificationOutputElement]:
"""
Perform image classification on the given image using the specified model.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The image to classify. It can be raw bytes, an image file, or a URL to an online image.
model (`str`, *optional*):
The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a
deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used.
function_to_apply (`"ImageClassificationOutputTransform"`, *optional*):
The function to apply to the model outputs in order to retrieve the scores.
top_k (`int`, *optional*):
When specified, limits the output to the top K most probable classes.
Returns:
`List[ImageClassificationOutputElement]`: a list of [`ImageClassificationOutputElement`] items containing the predicted label and associated probability.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
[ImageClassificationOutputElement(label='Blenheim spaniel', score=0.9779096841812134), ...]
```
"""
parameters = {"function_to_apply": function_to_apply, "top_k": top_k}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = await self.post(**payload, model=model, task="image-classification")
return ImageClassificationOutputElement.parse_obj_as_list(response)
async def image_segmentation(
self,
image: ContentT,
*,
model: Optional[str] = None,
mask_threshold: Optional[float] = None,
overlap_mask_area_threshold: Optional[float] = None,
subtask: Optional["ImageSegmentationSubtask"] = None,
threshold: Optional[float] = None,
) -> List[ImageSegmentationOutputElement]:
"""
Perform image segmentation on the given image using the specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The image to segment. It can be raw bytes, an image file, or a URL to an online image.
model (`str`, *optional*):
The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a
deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used.
mask_threshold (`float`, *optional*):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*):
Mask overlap threshold to eliminate small, disconnected segments.
subtask (`"ImageSegmentationSubtask"`, *optional*):
Segmentation task to be performed, depending on model capabilities.
threshold (`float`, *optional*):
Probability threshold to filter out predicted masks.
Returns:
`List[ImageSegmentationOutputElement]`: A list of [`ImageSegmentationOutputElement`] items containing the segmented masks and associated attributes.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.image_segmentation("cat.jpg")
[ImageSegmentationOutputElement(score=0.989008, label='LABEL_184', mask=<PIL.PngImagePlugin.PngImageFile image mode=L size=400x300 at 0x7FDD2B129CC0>), ...]
```
"""
parameters = {
"mask_threshold": mask_threshold,
"overlap_mask_area_threshold": overlap_mask_area_threshold,
"subtask": subtask,
"threshold": threshold,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = await self.post(**payload, model=model, task="image-segmentation")
output = ImageSegmentationOutputElement.parse_obj_as_list(response)
for item in output:
item.mask = _b64_to_image(item.mask) # type: ignore [assignment]
return output
async def image_to_image(
self,
image: ContentT,
prompt: Optional[str] = None,
*,
negative_prompt: Optional[List[str]] = None,
num_inference_steps: Optional[int] = None,
guidance_scale: Optional[float] = None,
model: Optional[str] = None,
target_size: Optional[ImageToImageTargetSize] = None,
**kwargs,
) -> "Image":
"""
Perform image-to-image translation using a specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image for translation. It can be raw bytes, an image file, or a URL to an online image.
prompt (`str`, *optional*):
The text prompt to guide the image generation.
negative_prompt (`List[str]`, *optional*):
One or several prompt to guide what NOT to include in image generation.
num_inference_steps (`int`, *optional*):
For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher
quality image at the expense of slower inference.
guidance_scale (`float`, *optional*):
For diffusion models. A higher guidance scale value encourages the model to generate images closely
linked to the text prompt at the expense of lower image quality.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
target_size (`ImageToImageTargetSize`, *optional*):
The size in pixel of the output image.
Returns:
`Image`: The translated image.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> image = await client.image_to_image("cat.jpg", prompt="turn the cat into a tiger")
>>> image.save("tiger.jpg")
```
"""
parameters = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"target_size": target_size,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
**kwargs,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = await self.post(**payload, model=model, task="image-to-image")
return _bytes_to_image(response)
async def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput:
"""
Takes an input image and return text.
Models can have very different outputs depending on your use case (image captioning, optical character recognition
(OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image to caption. It can be raw bytes, an image file, or a URL to an online image..
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
Returns:
[`ImageToTextOutput`]: The generated text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.image_to_text("cat.jpg")
'a cat standing in a grassy field '
>>> await client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
'a dog laying on the grass next to a flower pot '
```
"""
response = await self.post(data=image, model=model, task="image-to-text")
output = ImageToTextOutput.parse_obj(response)
return output[0] if isinstance(output, list) else output
async def list_deployed_models(
self, frameworks: Union[None, str, Literal["all"], List[str]] = None
) -> Dict[str, List[str]]:
"""
List models deployed on the Serverless Inference API service.
This helper checks deployed models framework by framework. By default, it will check the 4 main frameworks that
are supported and account for 95% of the hosted models. However, if you want a complete list of models you can
specify `frameworks="all"` as input. Alternatively, if you know before-hand which framework you are interested
in, you can also restrict to search to this one (e.g. `frameworks="text-generation-inference"`). The more
frameworks are checked, the more time it will take.
<Tip warning={true}>
This endpoint method does not return a live list of all models available for the Serverless Inference API service.
It searches over a cached list of models that were recently available and the list may not be up to date.
If you want to know the live status of a specific model, use [`~InferenceClient.get_model_status`].
</Tip>
<Tip>
This endpoint method is mostly useful for discoverability. If you already know which model you want to use and want to
check its availability, you can directly use [`~InferenceClient.get_model_status`].
</Tip>
Args:
frameworks (`Literal["all"]` or `List[str]` or `str`, *optional*):
The frameworks to filter on. By default only a subset of the available frameworks are tested. If set to
"all", all available frameworks will be tested. It is also possible to provide a single framework or a
custom set of frameworks to check.
Returns:
`Dict[str, List[str]]`: A dictionary mapping task names to a sorted list of model IDs.
Example:
```py
# Must be run in an async contextthon
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
# Discover zero-shot-classification models currently deployed
>>> models = await client.list_deployed_models()
>>> models["zero-shot-classification"]
['Narsil/deberta-large-mnli-zero-cls', 'facebook/bart-large-mnli', ...]
# List from only 1 framework
>>> await client.list_deployed_models("text-generation-inference")
{'text-generation': ['bigcode/starcoder', 'meta-llama/Llama-2-70b-chat-hf', ...], ...}
```
"""
# Resolve which frameworks to check
if frameworks is None:
frameworks = MAIN_INFERENCE_API_FRAMEWORKS
elif frameworks == "all":
frameworks = ALL_INFERENCE_API_FRAMEWORKS
elif isinstance(frameworks, str):
frameworks = [frameworks]
frameworks = list(set(frameworks))
# Fetch them iteratively
models_by_task: Dict[str, List[str]] = {}
def _unpack_response(framework: str, items: List[Dict]) -> None:
for model in items:
if framework == "sentence-transformers":
# Model running with the `sentence-transformers` framework can work with both tasks even if not
# branded as such in the API response
models_by_task.setdefault("feature-extraction", []).append(model["model_id"])
models_by_task.setdefault("sentence-similarity", []).append(model["model_id"])
else:
models_by_task.setdefault(model["task"], []).append(model["model_id"])
async def _fetch_framework(framework: str) -> None:
async with self._get_client_session() as client:
response = await client.get(f"{INFERENCE_ENDPOINT}/framework/{framework}", proxy=self.proxies)
response.raise_for_status()
_unpack_response(framework, await response.json())
import asyncio
await asyncio.gather(*[_fetch_framework(framework) for framework in frameworks])
# Sort alphabetically for discoverability and return
for task, models in models_by_task.items():
models_by_task[task] = sorted(set(models), key=lambda x: x.lower())
return models_by_task
async def object_detection(
self, image: ContentT, *, model: Optional[str] = None, threshold: Optional[float] = None
) -> List[ObjectDetectionOutputElement]:
"""
Perform object detection on the given image using the specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The image to detect objects on. It can be raw bytes, an image file, or a URL to an online image.
model (`str`, *optional*):
The model to use for object detection. Can be a model ID hosted on the Hugging Face Hub or a URL to a
deployed Inference Endpoint. If not provided, the default recommended model for object detection (DETR) will be used.
threshold (`float`, *optional*):
The probability necessary to make a prediction.
Returns:
`List[ObjectDetectionOutputElement]`: A list of [`ObjectDetectionOutputElement`] items containing the bounding boxes and associated attributes.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
`ValueError`:
If the request output is not a List.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.object_detection("people.jpg")
[ObjectDetectionOutputElement(score=0.9486683011054993, label='person', box=ObjectDetectionBoundingBox(xmin=59, ymin=39, xmax=420, ymax=510)), ...]
```
"""
parameters = {
"threshold": threshold,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = await self.post(**payload, model=model, task="object-detection")
return ObjectDetectionOutputElement.parse_obj_as_list(response)
async def question_answering(
self,
question: str,
context: str,
*,
model: Optional[str] = None,
align_to_words: Optional[bool] = None,
doc_stride: Optional[int] = None,
handle_impossible_answer: Optional[bool] = None,
max_answer_len: Optional[int] = None,
max_question_len: Optional[int] = None,
max_seq_len: Optional[int] = None,
top_k: Optional[int] = None,
) -> Union[QuestionAnsweringOutputElement, List[QuestionAnsweringOutputElement]]:
"""
Retrieve the answer to a question from a given text.
Args:
question (`str`):
Question to be answered.
context (`str`):
The context of the question.
model (`str`):
The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint.
align_to_words (`bool`, *optional*):
Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt
on non-space-separated languages (like Japanese or Chinese)
doc_stride (`int`, *optional*):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
handle_impossible_answer (`bool`, *optional*):
Whether to accept impossible as an answer.
max_answer_len (`int`, *optional*):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_question_len (`int`, *optional*):
The maximum length of the question after tokenization. It will be truncated if needed.
max_seq_len (`int`, *optional*):
The maximum length of the total sentence (context + question) in tokens of each chunk passed to the
model. The context will be split in several chunks (using docStride as overlap) if needed.
top_k (`int`, *optional*):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
topk answers if there are not enough options available within the context.
Returns:
Union[`QuestionAnsweringOutputElement`, List[`QuestionAnsweringOutputElement`]]:
When top_k is 1 or not provided, it returns a single `QuestionAnsweringOutputElement`.
When top_k is greater than 1, it returns a list of `QuestionAnsweringOutputElement`.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.")
QuestionAnsweringOutputElement(answer='Clara', end=16, score=0.9326565265655518, start=11)
```
"""
parameters = {
"align_to_words": align_to_words,
"doc_stride": doc_stride,
"handle_impossible_answer": handle_impossible_answer,
"max_answer_len": max_answer_len,
"max_question_len": max_question_len,
"max_seq_len": max_seq_len,
"top_k": top_k,
}
inputs: Dict[str, Any] = {"question": question, "context": context}
payload = _prepare_payload(inputs, parameters=parameters)
response = await self.post(
**payload,
model=model,
task="question-answering",
)
# Parse the response as a single `QuestionAnsweringOutputElement` when top_k is 1 or not provided, or a list of `QuestionAnsweringOutputElement` to ensure backward compatibility.
output = QuestionAnsweringOutputElement.parse_obj(response)
return output
async def sentence_similarity(
self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None
) -> List[float]:
"""
Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings.
Args:
sentence (`str`):
The main sentence to compare to others.
other_sentences (`List[str]`):
The list of sentences to compare to.
model (`str`, *optional*):
The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
Defaults to None.
Returns:
`List[float]`: The embedding representing the input text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.sentence_similarity(
... "Machine learning is so easy.",
... other_sentences=[
... "Deep learning is so straightforward.",
... "This is so difficult, like rocket science.",
... "I can't believe how much I struggled with this.",
... ],
... )
[0.7785726189613342, 0.45876261591911316, 0.2906220555305481]
```
"""
response = await self.post(
json={"inputs": {"source_sentence": sentence, "sentences": other_sentences}},
model=model,
task="sentence-similarity",
)
return _bytes_to_list(response)
@_deprecate_arguments(
version="0.29",
deprecated_args=["parameters"],
custom_message=(
"The `parameters` argument is deprecated and will be removed in a future version. "
"Provide individual parameters instead: `clean_up_tokenization_spaces`, `generate_parameters`, and `truncation`."
),
)
async def summarization(
self,
text: str,
*,
parameters: Optional[Dict[str, Any]] = None,
model: Optional[str] = None,
clean_up_tokenization_spaces: Optional[bool] = None,
generate_parameters: Optional[Dict[str, Any]] = None,
truncation: Optional["SummarizationTruncationStrategy"] = None,
) -> SummarizationOutput:
"""
Generate a summary of a given text using a specified model.
Args:
text (`str`):
The input text to summarize.
parameters (`Dict[str, Any]`, *optional*):
Additional parameters for summarization. Check out this [page](https://huggingface.co/docs/api-inference/detailed_parameters#summarization-task)
for more details.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended model for summarization will be used.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether to clean up the potential extra spaces in the text output.
generate_parameters (`Dict[str, Any]`, *optional*):
Additional parametrization of the text generation algorithm.
truncation (`"SummarizationTruncationStrategy"`, *optional*):
The truncation strategy to use.
Returns:
[`SummarizationOutput`]: The generated summary text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.summarization("The Eiffel tower...")
SummarizationOutput(generated_text="The Eiffel tower is one of the most famous landmarks in the world....")
```
"""
if parameters is None:
parameters = {
"clean_up_tokenization_spaces": clean_up_tokenization_spaces,
"generate_parameters": generate_parameters,
"truncation": truncation,
}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(**payload, model=model, task="summarization")
return SummarizationOutput.parse_obj_as_list(response)[0]
async def table_question_answering(
self,
table: Dict[str, Any],
query: str,
*,
model: Optional[str] = None,
padding: Optional["Padding"] = None,
sequential: Optional[bool] = None,
truncation: Optional[bool] = None,
) -> TableQuestionAnsweringOutputElement:
"""
Retrieve the answer to a question from information given in a table.
Args:
table (`str`):
A table of data represented as a dict of lists where entries are headers and the lists are all the
values, all lists must have the same size.
query (`str`):
The query in plain text that you want to ask the table.
model (`str`):
The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face
Hub or a URL to a deployed Inference Endpoint.
padding (`"Padding"`, *optional*):
Activates and controls padding.
sequential (`bool`, *optional*):
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
inference to be done sequentially to extract relations within sequences, given their conversational
nature.
truncation (`bool`, *optional*):
Activates and controls truncation.
Returns:
[`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> query = "How many stars does the transformers repository have?"
>>> table = {"Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"]}
>>> await client.table_question_answering(table, query, model="google/tapas-base-finetuned-wtq")
TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE')
```
"""
parameters = {
"padding": padding,
"sequential": sequential,
"truncation": truncation,
}
inputs = {
"query": query,
"table": table,
}
payload = _prepare_payload(inputs, parameters=parameters)
response = await self.post(
**payload,
model=model,
task="table-question-answering",
)
return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response)
async def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[str]:
"""
Classifying a target category (a group) based on a set of attributes.
Args:
table (`Dict[str, Any]`):
Set of attributes to classify.
model (`str`, *optional*):
The model to use for the tabular classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended tabular classification model will be used.
Defaults to None.
Returns:
`List`: a list of labels, one per row in the initial table.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> table = {
... "fixed_acidity": ["7.4", "7.8", "10.3"],
... "volatile_acidity": ["0.7", "0.88", "0.32"],
... "citric_acid": ["0", "0", "0.45"],
... "residual_sugar": ["1.9", "2.6", "6.4"],
... "chlorides": ["0.076", "0.098", "0.073"],
... "free_sulfur_dioxide": ["11", "25", "5"],
... "total_sulfur_dioxide": ["34", "67", "13"],
... "density": ["0.9978", "0.9968", "0.9976"],
... "pH": ["3.51", "3.2", "3.23"],
... "sulphates": ["0.56", "0.68", "0.82"],
... "alcohol": ["9.4", "9.8", "12.6"],
... }
>>> await client.tabular_classification(table=table, model="julien-c/wine-quality")
["5", "5", "5"]
```
"""
response = await self.post(
json={"table": table},
model=model,
task="tabular-classification",
)
return _bytes_to_list(response)
async def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[float]:
"""
Predicting a numerical target value given a set of attributes/features in a table.
Args:
table (`Dict[str, Any]`):
Set of attributes stored in a table. The attributes used to predict the target can be both numerical and categorical.
model (`str`, *optional*):
The model to use for the tabular regression task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended tabular regression model will be used.
Defaults to None.
Returns:
`List`: a list of predicted numerical target values.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> table = {
... "Height": ["11.52", "12.48", "12.3778"],
... "Length1": ["23.2", "24", "23.9"],
... "Length2": ["25.4", "26.3", "26.5"],
... "Length3": ["30", "31.2", "31.1"],
... "Species": ["Bream", "Bream", "Bream"],
... "Width": ["4.02", "4.3056", "4.6961"],
... }
>>> await client.tabular_regression(table, model="scikit-learn/Fish-Weight")
[110, 120, 130]
```
"""
response = await self.post(json={"table": table}, model=model, task="tabular-regression")
return _bytes_to_list(response)
async def text_classification(
self,
text: str,
*,
model: Optional[str] = None,
top_k: Optional[int] = None,
function_to_apply: Optional["TextClassificationOutputTransform"] = None,
) -> List[TextClassificationOutputElement]:
"""
Perform text classification (e.g. sentiment-analysis) on the given text.
Args:
text (`str`):
A string to be classified.
model (`str`, *optional*):
The model to use for the text classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended text classification model will be used.
Defaults to None.
top_k (`int`, *optional*):
When specified, limits the output to the top K most probable classes.
function_to_apply (`"TextClassificationOutputTransform"`, *optional*):
The function to apply to the model outputs in order to retrieve the scores.
Returns:
`List[TextClassificationOutputElement]`: a list of [`TextClassificationOutputElement`] items containing the predicted label and associated probability.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.text_classification("I like you")
[
TextClassificationOutputElement(label='POSITIVE', score=0.9998695850372314),
TextClassificationOutputElement(label='NEGATIVE', score=0.0001304351753788069),
]
```
"""
parameters = {
"function_to_apply": function_to_apply,
"top_k": top_k,
}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(
**payload,
model=model,
task="text-classification",
)
return TextClassificationOutputElement.parse_obj_as_list(response)[0] # type: ignore [return-value]
@overload
async def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[False] = ...,
stream: Literal[False] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> str: ...
@overload
async def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[True] = ...,
stream: Literal[False] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> TextGenerationOutput: ...
@overload
async def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[False] = ...,
stream: Literal[True] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> AsyncIterable[str]: ...
@overload
async def text_generation( # type: ignore
self,
prompt: str,
*,
details: Literal[True] = ...,
stream: Literal[True] = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> AsyncIterable[TextGenerationStreamOutput]: ...
@overload
async def text_generation(
self,
prompt: str,
*,
details: Literal[True] = ...,
stream: bool = ...,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> Union[TextGenerationOutput, AsyncIterable[TextGenerationStreamOutput]]: ...
async def text_generation(
self,
prompt: str,
*,
details: bool = False,
stream: bool = False,
model: Optional[str] = None,
# Parameters from `TextGenerationInputGenerateParameters` (maintained manually)
adapter_id: Optional[str] = None,
best_of: Optional[int] = None,
decoder_input_details: Optional[bool] = None,
do_sample: Optional[bool] = False, # Manual default value
frequency_penalty: Optional[float] = None,
grammar: Optional[TextGenerationInputGrammarType] = None,
max_new_tokens: Optional[int] = None,
repetition_penalty: Optional[float] = None,
return_full_text: Optional[bool] = False, # Manual default value
seed: Optional[int] = None,
stop: Optional[List[str]] = None,
stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_n_tokens: Optional[int] = None,
top_p: Optional[float] = None,
truncate: Optional[int] = None,
typical_p: Optional[float] = None,
watermark: Optional[bool] = None,
) -> Union[str, TextGenerationOutput, AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]:
"""
Given a prompt, generate the following text.
API endpoint is supposed to run with the `text-generation-inference` backend (TGI). This backend is the
go-to solution to run large language models at scale. However, for some smaller models (e.g. "gpt2") the
default `transformers` + `api-inference` solution is still in use. Both approaches have very similar APIs, but
not exactly the same. This method is compatible with both approaches but some parameters are only available for
`text-generation-inference`. If some parameters are ignored, a warning message is triggered but the process
continues correctly.
To learn more about the TGI project, please refer to https://github.com/huggingface/text-generation-inference.
<Tip>
If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
</Tip>
Args:
prompt (`str`):
Input text.
details (`bool`, *optional*):
By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens,
probabilities, seed, finish reason, etc.). Only available for models running on with the
`text-generation-inference` backend.
stream (`bool`, *optional*):
By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of
tokens to be returned. Only available for models running on with the `text-generation-inference`
backend.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
adapter_id (`str`, *optional*):
Lora adapter id.
best_of (`int`, *optional*):
Generate best_of sequences and return the one if the highest token logprobs.
decoder_input_details (`bool`, *optional*):
Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken
into account. Defaults to `False`.
do_sample (`bool`, *optional*):
Activate logits sampling
frequency_penalty (`float`, *optional*):
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in
the text so far, decreasing the model's likelihood to repeat the same line verbatim.
grammar ([`TextGenerationInputGrammarType`], *optional*):
Grammar constraints. Can be either a JSONSchema or a regex.
max_new_tokens (`int`, *optional*):
Maximum number of generated tokens. Defaults to 100.
repetition_penalty (`float`, *optional*):
The parameter for repetition penalty. 1.0 means no penalty. See [this
paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
return_full_text (`bool`, *optional*):
Whether to prepend the prompt to the generated text
seed (`int`, *optional*):
Random sampling seed
stop (`List[str]`, *optional*):
Stop generating tokens if a member of `stop` is generated.
stop_sequences (`List[str]`, *optional*):
Deprecated argument. Use `stop` instead.
temperature (`float`, *optional*):
The value used to module the logits distribution.
top_n_tokens (`int`, *optional*):
Return information about the `top_n_tokens` most likely tokens at each generation step, instead of
just the sampled token.
top_k (`int`, *optional`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`, *optional`):
If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
higher are kept for generation.
truncate (`int`, *optional`):
Truncate inputs tokens to the given size.
typical_p (`float`, *optional`):
Typical Decoding mass
See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
watermark (`bool`, *optional`):
Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
Returns:
`Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]`:
Generated text returned from the server:
- if `stream=False` and `details=False`, the generated text is returned as a `str` (default)
- if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]`
- if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.TextGenerationOutput`]
- if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.TextGenerationStreamOutput`]
Raises:
`ValidationError`:
If input values are not valid. No HTTP call is made to the server.
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
# Case 1: generate text
>>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12)
'100% open source and built to be easy to use.'
# Case 2: iterate over the generated tokens. Useful for large generation.
>>> async for token in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True):
... print(token)
100
%
open
source
and
built
to
be
easy
to
use
.
# Case 3: get more details about the generation process.
>>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True)
TextGenerationOutput(
generated_text='100% open source and built to be easy to use.',
details=TextGenerationDetails(
finish_reason='length',
generated_tokens=12,
seed=None,
prefill=[
TextGenerationPrefillOutputToken(id=487, text='The', logprob=None),
TextGenerationPrefillOutputToken(id=53789, text=' hugging', logprob=-13.171875),
(...)
TextGenerationPrefillOutputToken(id=204, text=' ', logprob=-7.0390625)
],
tokens=[
TokenElement(id=1425, text='100', logprob=-1.0175781, special=False),
TokenElement(id=16, text='%', logprob=-0.0463562, special=False),
(...)
TokenElement(id=25, text='.', logprob=-0.5703125, special=False)
],
best_of_sequences=None
)
)
# Case 4: iterate over the generated tokens with more details.
# Last object is more complete, containing the full generated text and the finish reason.
>>> async for details in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True):
... print(details)
...
TextGenerationStreamOutput(token=TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None)
TextGenerationStreamOutput(token=TokenElement(
id=25,
text='.',
logprob=-0.5703125,
special=False),
generated_text='100% open source and built to be easy to use.',
details=TextGenerationStreamOutputStreamDetails(finish_reason='length', generated_tokens=12, seed=None)
)
# Case 5: generate constrained output using grammar
>>> response = await client.text_generation(
... prompt="I saw a puppy a cat and a raccoon during my bike ride in the park",
... model="HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
... max_new_tokens=100,
... repetition_penalty=1.3,
... grammar={
... "type": "json",
... "value": {
... "properties": {
... "location": {"type": "string"},
... "activity": {"type": "string"},
... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5},
... "animals": {"type": "array", "items": {"type": "string"}},
... },
... "required": ["location", "activity", "animals_seen", "animals"],
... },
... },
... )
>>> json.loads(response)
{
"activity": "bike riding",
"animals": ["puppy", "cat", "raccoon"],
"animals_seen": 3,
"location": "park"
}
```
"""
if decoder_input_details and not details:
warnings.warn(
"`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that"
" the output from the server will be truncated."
)
decoder_input_details = False
if stop_sequences is not None:
warnings.warn(
"`stop_sequences` is a deprecated argument for `text_generation` task"
" and will be removed in version '0.28.0'. Use `stop` instead.",
FutureWarning,
)
if stop is None:
stop = stop_sequences # use deprecated arg if provided
# Build payload
parameters = {
"adapter_id": adapter_id,
"best_of": best_of,
"decoder_input_details": decoder_input_details,
"details": details,
"do_sample": do_sample,
"frequency_penalty": frequency_penalty,
"grammar": grammar,
"max_new_tokens": max_new_tokens,
"repetition_penalty": repetition_penalty,
"return_full_text": return_full_text,
"seed": seed,
"stop": stop if stop is not None else [],
"temperature": temperature,
"top_k": top_k,
"top_n_tokens": top_n_tokens,
"top_p": top_p,
"truncate": truncate,
"typical_p": typical_p,
"watermark": watermark,
}
parameters = {k: v for k, v in parameters.items() if v is not None}
payload = {
"inputs": prompt,
"parameters": parameters,
"stream": stream,
}
# Remove some parameters if not a TGI server
unsupported_kwargs = _get_unsupported_text_generation_kwargs(model)
if len(unsupported_kwargs) > 0:
# The server does not support some parameters
# => means it is not a TGI server
# => remove unsupported parameters and warn the user
ignored_parameters = []
for key in unsupported_kwargs:
if parameters.get(key):
ignored_parameters.append(key)
parameters.pop(key, None)
if len(ignored_parameters) > 0:
warnings.warn(
"API endpoint/model for text-generation is not served via TGI. Ignoring following parameters:"
f" {', '.join(ignored_parameters)}.",
UserWarning,
)
if details:
warnings.warn(
"API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will"
" be ignored meaning only the generated text will be returned.",
UserWarning,
)
details = False
if stream:
raise ValueError(
"API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream."
" Please pass `stream=False` as input."
)
# Handle errors separately for more precise error messages
try:
bytes_output = await self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore
except _import_aiohttp().ClientResponseError as e:
match = MODEL_KWARGS_NOT_USED_REGEX.search(e.response_error_payload["error"])
if e.status == 400 and match:
unused_params = [kwarg.strip("' ") for kwarg in match.group(1).split(",")]
_set_unsupported_text_generation_kwargs(model, unused_params)
return await self.text_generation( # type: ignore
prompt=prompt,
details=details,
stream=stream,
model=model,
adapter_id=adapter_id,
best_of=best_of,
decoder_input_details=decoder_input_details,
do_sample=do_sample,
frequency_penalty=frequency_penalty,
grammar=grammar,
max_new_tokens=max_new_tokens,
repetition_penalty=repetition_penalty,
return_full_text=return_full_text,
seed=seed,
stop=stop,
temperature=temperature,
top_k=top_k,
top_n_tokens=top_n_tokens,
top_p=top_p,
truncate=truncate,
typical_p=typical_p,
watermark=watermark,
)
raise_text_generation_error(e)
# Parse output
if stream:
return _async_stream_text_generation_response(bytes_output, details) # type: ignore
data = _bytes_to_dict(bytes_output) # type: ignore[arg-type]
# Data can be a single element (dict) or an iterable of dicts where we select the first element of.
if isinstance(data, list):
data = data[0]
return TextGenerationOutput.parse_obj_as_instance(data) if details else data["generated_text"]
async def text_to_image(
self,
prompt: str,
*,
negative_prompt: Optional[List[str]] = None,
height: Optional[float] = None,
width: Optional[float] = None,
num_inference_steps: Optional[int] = None,
guidance_scale: Optional[float] = None,
model: Optional[str] = None,
scheduler: Optional[str] = None,
target_size: Optional[TextToImageTargetSize] = None,
seed: Optional[int] = None,
**kwargs,
) -> "Image":
"""
Generate an image based on a given text using a specified model.
<Tip warning={true}>
You must have `PIL` installed if you want to work with images (`pip install Pillow`).
</Tip>
Args:
prompt (`str`):
The prompt to generate an image from.
negative_prompt (`List[str`, *optional*):
One or several prompt to guide what NOT to include in image generation.
height (`float`, *optional*):
The height in pixels of the image to generate.
width (`float`, *optional*):
The width in pixels of the image to generate.
num_inference_steps (`int`, *optional*):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*):
A higher guidance scale value encourages the model to generate images closely linked to the text
prompt, but values too high may cause saturation and other artifacts.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended text-to-image model will be used.
Defaults to None.
scheduler (`str`, *optional*):
Override the scheduler with a compatible one.
target_size (`TextToImageTargetSize`, *optional*):
The size in pixel of the output image
seed (`int`, *optional*):
Seed for the random number generator.
Returns:
`Image`: The generated image.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> image = await client.text_to_image("An astronaut riding a horse on the moon.")
>>> image.save("astronaut.png")
>>> image = await client.text_to_image(
... "An astronaut riding a horse on the moon.",
... negative_prompt="low resolution, blurry",
... model="stabilityai/stable-diffusion-2-1",
... )
>>> image.save("better_astronaut.png")
```
"""
parameters = {
"negative_prompt": negative_prompt,
"height": height,
"width": width,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale,
"scheduler": scheduler,
"target_size": target_size,
"seed": seed,
**kwargs,
}
payload = _prepare_payload(prompt, parameters=parameters)
response = await self.post(**payload, model=model, task="text-to-image")
return _bytes_to_image(response)
async def text_to_speech(
self,
text: str,
*,
model: Optional[str] = None,
do_sample: Optional[bool] = None,
early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None,
epsilon_cutoff: Optional[float] = None,
eta_cutoff: Optional[float] = None,
max_length: Optional[int] = None,
max_new_tokens: Optional[int] = None,
min_length: Optional[int] = None,
min_new_tokens: Optional[int] = None,
num_beam_groups: Optional[int] = None,
num_beams: Optional[int] = None,
penalty_alpha: Optional[float] = None,
temperature: Optional[float] = None,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
typical_p: Optional[float] = None,
use_cache: Optional[bool] = None,
) -> bytes:
"""
Synthesize an audio of a voice pronouncing a given text.
Args:
text (`str`):
The text to synthesize.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. If not provided, the default recommended text-to-speech model will be used.
Defaults to None.
do_sample (`bool`, *optional*):
Whether to use sampling instead of greedy decoding when generating new tokens.
early_stopping (`Union[bool, "TextToSpeechEarlyStoppingEnum"]`, *optional*):
Controls the stopping condition for beam-based methods.
epsilon_cutoff (`float`, *optional*):
If set to float strictly between 0 and 1, only tokens with a conditional probability greater than
epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on
the size of the model. See [Truncation Sampling as Language Model
Desmoothing](https://hf.co/papers/2210.15191) for more details.
eta_cutoff (`float`, *optional*):
Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly
between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff)
* exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token
probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3,
depending on the size of the model. See [Truncation Sampling as Language Model
Desmoothing](https://hf.co/papers/2210.15191) for more details.
max_length (`int`, *optional*):
The maximum length (in tokens) of the generated text, including the input.
max_new_tokens (`int`, *optional*):
The maximum number of tokens to generate. Takes precedence over max_length.
min_length (`int`, *optional*):
The minimum length (in tokens) of the generated text, including the input.
min_new_tokens (`int`, *optional*):
The minimum number of tokens to generate. Takes precedence over min_length.
num_beam_groups (`int`, *optional*):
Number of groups to divide num_beams into in order to ensure diversity among different groups of beams.
See [this paper](https://hf.co/papers/1610.02424) for more details.
num_beams (`int`, *optional*):
Number of beams to use for beam search.
penalty_alpha (`float`, *optional*):
The value balances the model confidence and the degeneration penalty in contrastive search decoding.
temperature (`float`, *optional*):
The value used to modulate the next token probabilities.
top_k (`int`, *optional*):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (`float`, *optional*):
If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
top_p or higher are kept for generation.
typical_p (`float`, *optional*):
Local typicality measures how similar the conditional probability of predicting a target token next is
to the expected conditional probability of predicting a random token next, given the partial text
already generated. If set to float < 1, the smallest set of the most locally typical tokens with
probabilities that add up to typical_p or higher are kept for generation. See [this
paper](https://hf.co/papers/2202.00666) for more details.
use_cache (`bool`, *optional*):
Whether the model should use the past last key/values attentions to speed up decoding
Returns:
`bytes`: The generated audio.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from pathlib import Path
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> audio = await client.text_to_speech("Hello world")
>>> Path("hello_world.flac").write_bytes(audio)
```
"""
parameters = {
"do_sample": do_sample,
"early_stopping": early_stopping,
"epsilon_cutoff": epsilon_cutoff,
"eta_cutoff": eta_cutoff,
"max_length": max_length,
"max_new_tokens": max_new_tokens,
"min_length": min_length,
"min_new_tokens": min_new_tokens,
"num_beam_groups": num_beam_groups,
"num_beams": num_beams,
"penalty_alpha": penalty_alpha,
"temperature": temperature,
"top_k": top_k,
"top_p": top_p,
"typical_p": typical_p,
"use_cache": use_cache,
}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(**payload, model=model, task="text-to-speech")
return response
async def token_classification(
self,
text: str,
*,
model: Optional[str] = None,
aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None,
ignore_labels: Optional[List[str]] = None,
stride: Optional[int] = None,
) -> List[TokenClassificationOutputElement]:
"""
Perform token classification on the given text.
Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text.
Args:
text (`str`):
A string to be classified.
model (`str`, *optional*):
The model to use for the token classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended token classification model will be used.
Defaults to None.
aggregation_strategy (`"TokenClassificationAggregationStrategy"`, *optional*):
The strategy used to fuse tokens based on model predictions
ignore_labels (`List[str`, *optional*):
A list of labels to ignore
stride (`int`, *optional*):
The number of overlapping tokens between chunks when splitting the input text.
Returns:
`List[TokenClassificationOutputElement]`: List of [`TokenClassificationOutputElement`] items containing the entity group, confidence score, word, start and end index.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.token_classification("My name is Sarah Jessica Parker but you can call me Jessica")
[
TokenClassificationOutputElement(
entity_group='PER',
score=0.9971321225166321,
word='Sarah Jessica Parker',
start=11,
end=31,
),
TokenClassificationOutputElement(
entity_group='PER',
score=0.9773476123809814,
word='Jessica',
start=52,
end=59,
)
]
```
"""
parameters = {
"aggregation_strategy": aggregation_strategy,
"ignore_labels": ignore_labels,
"stride": stride,
}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(
**payload,
model=model,
task="token-classification",
)
return TokenClassificationOutputElement.parse_obj_as_list(response)
async def translation(
self,
text: str,
*,
model: Optional[str] = None,
src_lang: Optional[str] = None,
tgt_lang: Optional[str] = None,
clean_up_tokenization_spaces: Optional[bool] = None,
truncation: Optional["TranslationTruncationStrategy"] = None,
generate_parameters: Optional[Dict[str, Any]] = None,
) -> TranslationOutput:
"""
Convert text from one language to another.
Check out https://huggingface.co/tasks/translation for more information on how to choose the best model for
your specific use case. Source and target languages usually depend on the model.
However, it is possible to specify source and target languages for certain models. If you are working with one of these models,
you can use `src_lang` and `tgt_lang` arguments to pass the relevant information.
Args:
text (`str`):
A string to be translated.
model (`str`, *optional*):
The model to use for the translation task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended translation model will be used.
Defaults to None.
src_lang (`str`, *optional*):
The source language of the text. Required for models that can translate from multiple languages.
tgt_lang (`str`, *optional*):
Target language to translate to. Required for models that can translate to multiple languages.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether to clean up the potential extra spaces in the text output.
truncation (`"TranslationTruncationStrategy"`, *optional*):
The truncation strategy to use.
generate_parameters (`Dict[str, Any]`, *optional*):
Additional parametrization of the text generation algorithm.
Returns:
[`TranslationOutput`]: The generated translated text.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
`ValueError`:
If only one of the `src_lang` and `tgt_lang` arguments are provided.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.translation("My name is Wolfgang and I live in Berlin")
'Mein Name ist Wolfgang und ich lebe in Berlin.'
>>> await client.translation("My name is Wolfgang and I live in Berlin", model="Helsinki-NLP/opus-mt-en-fr")
TranslationOutput(translation_text='Je m'appelle Wolfgang et je vis à Berlin.')
```
Specifying languages:
```py
>>> client.translation("My name is Sarah Jessica Parker but you can call me Jessica", model="facebook/mbart-large-50-many-to-many-mmt", src_lang="en_XX", tgt_lang="fr_XX")
"Mon nom est Sarah Jessica Parker mais vous pouvez m'appeler Jessica"
```
"""
# Throw error if only one of `src_lang` and `tgt_lang` was given
if src_lang is not None and tgt_lang is None:
raise ValueError("You cannot specify `src_lang` without specifying `tgt_lang`.")
if src_lang is None and tgt_lang is not None:
raise ValueError("You cannot specify `tgt_lang` without specifying `src_lang`.")
parameters = {
"src_lang": src_lang,
"tgt_lang": tgt_lang,
"clean_up_tokenization_spaces": clean_up_tokenization_spaces,
"truncation": truncation,
"generate_parameters": generate_parameters,
}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(**payload, model=model, task="translation")
return TranslationOutput.parse_obj_as_list(response)[0]
async def visual_question_answering(
self,
image: ContentT,
question: str,
*,
model: Optional[str] = None,
top_k: Optional[int] = None,
) -> List[VisualQuestionAnsweringOutputElement]:
"""
Answering open-ended questions based on an image.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image for the context. It can be raw bytes, an image file, or a URL to an online image.
question (`str`):
Question to be answered.
model (`str`, *optional*):
The model to use for the visual question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to
a deployed Inference Endpoint. If not provided, the default recommended visual question answering model will be used.
Defaults to None.
top_k (`int`, *optional*):
The number of answers to return (will be chosen by order of likelihood). Note that we return less than
topk answers if there are not enough options available within the context.
Returns:
`List[VisualQuestionAnsweringOutputElement]`: a list of [`VisualQuestionAnsweringOutputElement`] items containing the predicted label and associated probability.
Raises:
`InferenceTimeoutError`:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.visual_question_answering(
... image="https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg",
... question="What is the animal doing?"
... )
[
VisualQuestionAnsweringOutputElement(score=0.778609573841095, answer='laying down'),
VisualQuestionAnsweringOutputElement(score=0.6957435607910156, answer='sitting'),
]
```
"""
payload: Dict[str, Any] = {"question": question, "image": _b64_encode(image)}
if top_k is not None:
payload.setdefault("parameters", {})["top_k"] = top_k
response = await self.post(json=payload, model=model, task="visual-question-answering")
return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response)
@_deprecate_arguments(
version="0.30.0",
deprecated_args=["labels"],
custom_message="`labels`has been renamed to `candidate_labels` and will be removed in huggingface_hub>=0.30.0.",
)
async def zero_shot_classification(
self,
text: str,
# temporarily keeping it optional for backward compatibility.
candidate_labels: List[str] = None, # type: ignore
*,
multi_label: Optional[bool] = False,
hypothesis_template: Optional[str] = None,
model: Optional[str] = None,
# deprecated argument
labels: List[str] = None, # type: ignore
) -> List[ZeroShotClassificationOutputElement]:
"""
Provide as input a text and a set of candidate labels to classify the input text.
Args:
text (`str`):
The input text to classify.
candidate_labels (`List[str]`):
The set of possible class labels to classify the text into.
labels (`List[str]`, *optional*):
(deprecated) List of strings. Each string is the verbalization of a possible label for the input text.
multi_label (`bool`, *optional*):
Whether multiple candidate labels can be true. If false, the scores are normalized such that the sum of
the label likelihoods for each sequence is 1. If true, the labels are considered independent and
probabilities are normalized for each candidate.
hypothesis_template (`str`, *optional*):
The sentence used in conjunction with `candidate_labels` to attempt the text classification by
replacing the placeholder with the candidate labels.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot classification model will be used.
Returns:
`List[ZeroShotClassificationOutputElement]`: List of [`ZeroShotClassificationOutputElement`] items containing the predicted labels and their confidence.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example with `multi_label=False`:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> text = (
... "A new model offers an explanation for how the Galilean satellites formed around the solar system's"
... "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling"
... " mysteries when he went for a run up a hill in Nice, France."
... )
>>> labels = ["space & cosmos", "scientific discovery", "microbiology", "robots", "archeology"]
>>> await client.zero_shot_classification(text, labels)
[
ZeroShotClassificationOutputElement(label='scientific discovery', score=0.7961668968200684),
ZeroShotClassificationOutputElement(label='space & cosmos', score=0.18570658564567566),
ZeroShotClassificationOutputElement(label='microbiology', score=0.00730885099619627),
ZeroShotClassificationOutputElement(label='archeology', score=0.006258360575884581),
ZeroShotClassificationOutputElement(label='robots', score=0.004559356719255447),
]
>>> await client.zero_shot_classification(text, labels, multi_label=True)
[
ZeroShotClassificationOutputElement(label='scientific discovery', score=0.9829297661781311),
ZeroShotClassificationOutputElement(label='space & cosmos', score=0.755190908908844),
ZeroShotClassificationOutputElement(label='microbiology', score=0.0005462635890580714),
ZeroShotClassificationOutputElement(label='archeology', score=0.00047131875180639327),
ZeroShotClassificationOutputElement(label='robots', score=0.00030448526376858354),
]
```
Example with `multi_label=True` and a custom `hypothesis_template`:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.zero_shot_classification(
... text="I really like our dinner and I'm very happy. I don't like the weather though.",
... labels=["positive", "negative", "pessimistic", "optimistic"],
... multi_label=True,
... hypothesis_template="This text is {} towards the weather"
... )
[
ZeroShotClassificationOutputElement(label='negative', score=0.9231801629066467),
ZeroShotClassificationOutputElement(label='pessimistic', score=0.8760990500450134),
ZeroShotClassificationOutputElement(label='optimistic', score=0.0008674879791215062),
ZeroShotClassificationOutputElement(label='positive', score=0.0005250611575320363)
]
```
"""
# handle deprecation
if labels is not None:
if candidate_labels is not None:
raise ValueError(
"Cannot specify both `labels` and `candidate_labels`. Use `candidate_labels` instead."
)
candidate_labels = labels
elif candidate_labels is None:
raise ValueError("Must specify `candidate_labels`")
parameters = {
"candidate_labels": candidate_labels,
"multi_label": multi_label,
"hypothesis_template": hypothesis_template,
}
payload = _prepare_payload(text, parameters=parameters)
response = await self.post(
**payload,
task="zero-shot-classification",
model=model,
)
output = _bytes_to_dict(response)
return [
ZeroShotClassificationOutputElement.parse_obj_as_instance({"label": label, "score": score})
for label, score in zip(output["labels"], output["scores"])
]
@_deprecate_arguments(
version="0.30.0",
deprecated_args=["labels"],
custom_message="`labels`has been renamed to `candidate_labels` and will be removed in huggingface_hub>=0.30.0.",
)
async def zero_shot_image_classification(
self,
image: ContentT,
# temporarily keeping it optional for backward compatibility.
candidate_labels: List[str] = None, # type: ignore
*,
model: Optional[str] = None,
hypothesis_template: Optional[str] = None,
# deprecated argument
labels: List[str] = None, # type: ignore
) -> List[ZeroShotImageClassificationOutputElement]:
"""
Provide input image and text labels to predict text labels for the image.
Args:
image (`Union[str, Path, bytes, BinaryIO]`):
The input image to caption. It can be raw bytes, an image file, or a URL to an online image.
candidate_labels (`List[str]`):
The candidate labels for this image
labels (`List[str]`, *optional*):
(deprecated) List of string possible labels. There must be at least 2 labels.
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot image classification model will be used.
hypothesis_template (`str`, *optional*):
The sentence used in conjunction with `candidate_labels` to attempt the image classification by
replacing the placeholder with the candidate labels.
Returns:
`List[ZeroShotImageClassificationOutputElement]`: List of [`ZeroShotImageClassificationOutputElement`] items containing the predicted labels and their confidence.
Raises:
[`InferenceTimeoutError`]:
If the model is unavailable or the request times out.
`aiohttp.ClientResponseError`:
If the request fails with an HTTP error status code other than HTTP 503.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.zero_shot_image_classification(
... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg",
... labels=["dog", "cat", "horse"],
... )
[ZeroShotImageClassificationOutputElement(label='dog', score=0.956),...]
```
"""
# handle deprecation
if labels is not None:
if candidate_labels is not None:
raise ValueError(
"Cannot specify both `labels` and `candidate_labels`. Use `candidate_labels` instead."
)
candidate_labels = labels
elif candidate_labels is None:
raise ValueError("Must specify `candidate_labels`")
# Raise ValueError if input is less than 2 labels
if len(candidate_labels) < 2:
raise ValueError("You must specify at least 2 classes to compare.")
parameters = {
"candidate_labels": candidate_labels,
"hypothesis_template": hypothesis_template,
}
payload = _prepare_payload(image, parameters=parameters, expect_binary=True)
response = await self.post(
**payload,
model=model,
task="zero-shot-image-classification",
)
return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response)
def _get_client_session(self, headers: Optional[Dict] = None) -> "ClientSession":
aiohttp = _import_aiohttp()
client_headers = self.headers.copy()
if headers is not None:
client_headers.update(headers)
# Return a new aiohttp ClientSession with correct settings.
session = aiohttp.ClientSession(
headers=client_headers,
cookies=self.cookies,
timeout=aiohttp.ClientTimeout(self.timeout),
trust_env=self.trust_env,
)
# Keep track of sessions to close them later
self._sessions[session] = set()
# Override the `._request` method to register responses to be closed
session._wrapped_request = session._request
async def _request(method, url, **kwargs):
response = await session._wrapped_request(method, url, **kwargs)
self._sessions[session].add(response)
return response
session._request = _request
# Override the 'close' method to
# 1. close ongoing responses
# 2. deregister the session when closed
session._close = session.close
async def close_session():
for response in self._sessions[session]:
response.close()
await session._close()
self._sessions.pop(session, None)
session.close = close_session
return session
def _resolve_url(self, model: Optional[str] = None, task: Optional[str] = None) -> str:
model = model or self.model or self.base_url
# If model is already a URL, ignore `task` and return directly
if model is not None and (model.startswith("http://") or model.startswith("https://")):
return model
# # If no model but task is set => fetch the recommended one for this task
if model is None:
if task is None:
raise ValueError(
"You must specify at least a model (repo_id or URL) or a task, either when instantiating"
" `InferenceClient` or when making a request."
)
model = self.get_recommended_model(task)
logger.info(
f"Using recommended model {model} for task {task}. Note that it is"
f" encouraged to explicitly set `model='{model}'` as the recommended"
" models list might get updated without prior notice."
)
# Compute InferenceAPI url
return (
# Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks.
f"{INFERENCE_ENDPOINT}/pipeline/{task}/{model}"
if task in ("feature-extraction", "sentence-similarity")
# Otherwise, we use the default endpoint
else f"{INFERENCE_ENDPOINT}/models/{model}"
)
@staticmethod
def get_recommended_model(task: str) -> str:
"""
Get the model Hugging Face recommends for the input task.
Args:
task (`str`):
The Hugging Face task to get which model Hugging Face recommends.
All available tasks can be found [here](https://huggingface.co/tasks).
Returns:
`str`: Name of the model recommended for the input task.
Raises:
`ValueError`: If Hugging Face has no recommendation for the input task.
"""
model = _fetch_recommended_models().get(task)
if model is None:
raise ValueError(
f"Task {task} has no recommended model. Please specify a model"
" explicitly. Visit https://huggingface.co/tasks for more info."
)
return model
async def get_endpoint_info(self, *, model: Optional[str] = None) -> Dict[str, Any]:
"""
Get information about the deployed endpoint.
This endpoint is only available on endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI).
Endpoints powered by `transformers` return an empty payload.
Args:
model (`str`, *optional*):
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
Returns:
`Dict[str, Any]`: Information about the endpoint.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
>>> await client.get_endpoint_info()
{
'model_id': 'meta-llama/Meta-Llama-3-70B-Instruct',
'model_sha': None,
'model_dtype': 'torch.float16',
'model_device_type': 'cuda',
'model_pipeline_tag': None,
'max_concurrent_requests': 128,
'max_best_of': 2,
'max_stop_sequences': 4,
'max_input_length': 8191,
'max_total_tokens': 8192,
'waiting_served_ratio': 0.3,
'max_batch_total_tokens': 1259392,
'max_waiting_tokens': 20,
'max_batch_size': None,
'validation_workers': 32,
'max_client_batch_size': 4,
'version': '2.0.2',
'sha': 'dccab72549635c7eb5ddb17f43f0b7cdff07c214',
'docker_label': 'sha-dccab72'
}
```
"""
model = model or self.model
if model is None:
raise ValueError("Model id not provided.")
if model.startswith(("http://", "https://")):
url = model.rstrip("/") + "/info"
else:
url = f"{INFERENCE_ENDPOINT}/models/{model}/info"
async with self._get_client_session() as client:
response = await client.get(url, proxy=self.proxies)
response.raise_for_status()
return await response.json()
async def health_check(self, model: Optional[str] = None) -> bool:
"""
Check the health of the deployed endpoint.
Health check is only available with Inference Endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI).
For Inference API, please use [`InferenceClient.get_model_status`] instead.
Args:
model (`str`, *optional*):
URL of the Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
Returns:
`bool`: True if everything is working fine.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient("https://jzgu0buei5.us-east-1.aws.endpoints.huggingface.cloud")
>>> await client.health_check()
True
```
"""
model = model or self.model
if model is None:
raise ValueError("Model id not provided.")
if not model.startswith(("http://", "https://")):
raise ValueError(
"Model must be an Inference Endpoint URL. For serverless Inference API, please use `InferenceClient.get_model_status`."
)
url = model.rstrip("/") + "/health"
async with self._get_client_session() as client:
response = await client.get(url, proxy=self.proxies)
return response.status == 200
async def get_model_status(self, model: Optional[str] = None) -> ModelStatus:
"""
Get the status of a model hosted on the Inference API.
<Tip>
This endpoint is mostly useful when you already know which model you want to use and want to check its
availability. If you want to discover already deployed models, you should rather use [`~InferenceClient.list_deployed_models`].
</Tip>
Args:
model (`str`, *optional*):
Identifier of the model for witch the status gonna be checked. If model is not provided,
the model associated with this instance of [`InferenceClient`] will be used. Only InferenceAPI service can be checked so the
identifier cannot be a URL.
Returns:
[`ModelStatus`]: An instance of ModelStatus dataclass, containing information,
about the state of the model: load, state, compute type and framework.
Example:
```py
# Must be run in an async context
>>> from huggingface_hub import AsyncInferenceClient
>>> client = AsyncInferenceClient()
>>> await client.get_model_status("meta-llama/Meta-Llama-3-8B-Instruct")
ModelStatus(loaded=True, state='Loaded', compute_type='gpu', framework='text-generation-inference')
```
"""
model = model or self.model
if model is None:
raise ValueError("Model id not provided.")
if model.startswith("https://"):
raise NotImplementedError("Model status is only available for Inference API endpoints.")
url = f"{INFERENCE_ENDPOINT}/status/{model}"
async with self._get_client_session() as client:
response = await client.get(url, proxy=self.proxies)
response.raise_for_status()
response_data = await response.json()
if "error" in response_data:
raise ValueError(response_data["error"])
return ModelStatus(
loaded=response_data["loaded"],
state=response_data["state"],
compute_type=response_data["compute_type"],
framework=response_data["framework"],
)
@property
def chat(self) -> "ProxyClientChat":
return ProxyClientChat(self) | class_definition | 3,728 | 154,949 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/_async_client.py | null | 143 |
class _ProxyClient:
"""Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client."""
def __init__(self, client: AsyncInferenceClient):
self._client = client | class_definition | 154,952 | 155,152 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/_async_client.py | null | 144 |
class ProxyClientChat(_ProxyClient):
"""Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client."""
@property
def completions(self) -> "ProxyClientChatCompletions":
return ProxyClientChatCompletions(self._client) | class_definition | 155,155 | 155,417 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/_async_client.py | null | 145 |
class ProxyClientChatCompletions(_ProxyClient):
"""Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client."""
@property
def create(self):
return self._client.chat_completion | class_definition | 155,420 | 155,644 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/_async_client.py | null | 146 |
class TextToAudioGenerationParameters(BaseInferenceType):
"""Parametrization of the text generation process"""
do_sample: Optional[bool] = None
"""Whether to use sampling instead of greedy decoding when generating new tokens."""
early_stopping: Optional[Union[bool, "TextToAudioEarlyStoppingEnum"]] = None
"""Controls the stopping condition for beam-based methods."""
epsilon_cutoff: Optional[float] = None
"""If set to float strictly between 0 and 1, only tokens with a conditional probability
greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
"""
eta_cutoff: Optional[float] = None
"""Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
float strictly between 0 and 1, a token is only considered if it is greater than either
eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
for more details.
"""
max_length: Optional[int] = None
"""The maximum length (in tokens) of the generated text, including the input."""
max_new_tokens: Optional[int] = None
"""The maximum number of tokens to generate. Takes precedence over max_length."""
min_length: Optional[int] = None
"""The minimum length (in tokens) of the generated text, including the input."""
min_new_tokens: Optional[int] = None
"""The minimum number of tokens to generate. Takes precedence over min_length."""
num_beam_groups: Optional[int] = None
"""Number of groups to divide num_beams into in order to ensure diversity among different
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
"""
num_beams: Optional[int] = None
"""Number of beams to use for beam search."""
penalty_alpha: Optional[float] = None
"""The value balances the model confidence and the degeneration penalty in contrastive
search decoding.
"""
temperature: Optional[float] = None
"""The value used to modulate the next token probabilities."""
top_k: Optional[int] = None
"""The number of highest probability vocabulary tokens to keep for top-k-filtering."""
top_p: Optional[float] = None
"""If set to float < 1, only the smallest set of most probable tokens with probabilities
that add up to top_p or higher are kept for generation.
"""
typical_p: Optional[float] = None
"""Local typicality measures how similar the conditional probability of predicting a target
token next is to the expected conditional probability of predicting a random token next,
given the partial text already generated. If set to float < 1, the smallest set of the
most locally typical tokens with probabilities that add up to typical_p or higher are
kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
"""
use_cache: Optional[bool] = None
"""Whether the model should use the past last key/values attentions to speed up decoding""" | class_definition | 478 | 3,933 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_audio.py | null | 147 |
class TextToAudioParameters(BaseInferenceType):
"""Additional inference parameters for Text To Audio"""
# Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
generate_kwargs: Optional[TextToAudioGenerationParameters] = None
"""Parametrization of the text generation process""" | class_definition | 3,947 | 4,297 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_audio.py | null | 148 |
class TextToAudioInput(BaseInferenceType):
"""Inputs for Text To Audio inference"""
inputs: str
"""The input text data"""
parameters: Optional[TextToAudioParameters] = None
"""Additional inference parameters for Text To Audio""" | class_definition | 4,311 | 4,560 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_audio.py | null | 149 |
class TextToAudioOutput(BaseInferenceType):
"""Outputs of inference for the Text To Audio task"""
audio: Any
"""The generated audio waveform."""
sampling_rate: Any
text_to_audio_output_sampling_rate: Optional[float] = None
"""The sampling rate of the generated audio waveform.""" | class_definition | 4,574 | 4,878 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_audio.py | null | 150 |
class QuestionAnsweringInputData(BaseInferenceType):
"""One (context, question) pair to answer"""
context: str
"""The context to be used for answering the question"""
question: str
"""The question to be answered""" | class_definition | 407 | 642 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/question_answering.py | null | 151 |
class QuestionAnsweringParameters(BaseInferenceType):
"""Additional inference parameters for Question Answering"""
align_to_words: Optional[bool] = None
"""Attempts to align the answer to real words. Improves quality on space separated
languages. Might hurt on non-space-separated languages (like Japanese or Chinese)
"""
doc_stride: Optional[int] = None
"""If the context is too long to fit with the question for the model, it will be split in
several chunks with some overlap. This argument controls the size of that overlap.
"""
handle_impossible_answer: Optional[bool] = None
"""Whether to accept impossible as an answer."""
max_answer_len: Optional[int] = None
"""The maximum length of predicted answers (e.g., only answers with a shorter length are
considered).
"""
max_question_len: Optional[int] = None
"""The maximum length of the question after tokenization. It will be truncated if needed."""
max_seq_len: Optional[int] = None
"""The maximum length of the total sentence (context + question) in tokens of each chunk
passed to the model. The context will be split in several chunks (using docStride as
overlap) if needed.
"""
top_k: Optional[int] = None
"""The number of answers to return (will be chosen by order of likelihood). Note that we
return less than topk answers if there are not enough options available within the
context.
""" | class_definition | 656 | 2,113 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/question_answering.py | null | 152 |
class QuestionAnsweringInput(BaseInferenceType):
"""Inputs for Question Answering inference"""
inputs: QuestionAnsweringInputData
"""One (context, question) pair to answer"""
parameters: Optional[QuestionAnsweringParameters] = None
"""Additional inference parameters for Question Answering""" | class_definition | 2,127 | 2,440 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/question_answering.py | null | 153 |
class QuestionAnsweringOutputElement(BaseInferenceType):
"""Outputs of inference for the Question Answering task"""
answer: str
"""The answer to the question."""
end: int
"""The character position in the input where the answer ends."""
score: float
"""The probability associated to the answer."""
start: int
"""The character position in the input where the answer begins.""" | class_definition | 2,454 | 2,865 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/question_answering.py | null | 154 |
class Text2TextGenerationParameters(BaseInferenceType):
"""Additional inference parameters for Text2text Generation"""
clean_up_tokenization_spaces: Optional[bool] = None
"""Whether to clean up the potential extra spaces in the text output."""
generate_parameters: Optional[Dict[str, Any]] = None
"""Additional parametrization of the text generation algorithm"""
truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
"""The truncation strategy to use""" | class_definition | 542 | 1,039 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text2text_generation.py | null | 155 |
class Text2TextGenerationInput(BaseInferenceType):
"""Inputs for Text2text Generation inference"""
inputs: str
"""The input text data"""
parameters: Optional[Text2TextGenerationParameters] = None
"""Additional inference parameters for Text2text Generation""" | class_definition | 1,053 | 1,332 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text2text_generation.py | null | 156 |
class Text2TextGenerationOutput(BaseInferenceType):
"""Outputs of inference for the Text2text Generation task"""
generated_text: Any
text2_text_generation_output_generated_text: Optional[str] = None
"""The generated text.""" | class_definition | 1,346 | 1,587 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text2text_generation.py | null | 157 |
class ImageSegmentationParameters(BaseInferenceType):
"""Additional inference parameters for Image Segmentation"""
mask_threshold: Optional[float] = None
"""Threshold to use when turning the predicted masks into binary values."""
overlap_mask_area_threshold: Optional[float] = None
"""Mask overlap threshold to eliminate small, disconnected segments."""
subtask: Optional["ImageSegmentationSubtask"] = None
"""Segmentation task to be performed, depending on model capabilities."""
threshold: Optional[float] = None
"""Probability threshold to filter out predicted masks.""" | class_definition | 489 | 1,099 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_segmentation.py | null | 158 |
class ImageSegmentationInput(BaseInferenceType):
"""Inputs for Image Segmentation inference"""
inputs: str
"""The input image data as a base64-encoded string. If no `parameters` are provided, you can
also provide the image data as a raw bytes payload.
"""
parameters: Optional[ImageSegmentationParameters] = None
"""Additional inference parameters for Image Segmentation""" | class_definition | 1,113 | 1,515 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_segmentation.py | null | 159 |
class ImageSegmentationOutputElement(BaseInferenceType):
"""Outputs of inference for the Image Segmentation task
A predicted mask / segment
"""
label: str
"""The label of the predicted segment."""
mask: str
"""The corresponding mask as a black-and-white image (base64-encoded)."""
score: Optional[float] = None
"""The score or confidence degree the model has.""" | class_definition | 1,529 | 1,928 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_segmentation.py | null | 160 |
class TextToImageTargetSize(BaseInferenceType):
"""The size in pixel of the output image"""
height: int
width: int | class_definition | 418 | 545 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_image.py | null | 161 |
class TextToImageParameters(BaseInferenceType):
"""Additional inference parameters for Text To Image"""
guidance_scale: Optional[float] = None
"""A higher guidance scale value encourages the model to generate images closely linked to
the text prompt, but values too high may cause saturation and other artifacts.
"""
negative_prompt: Optional[List[str]] = None
"""One or several prompt to guide what NOT to include in image generation."""
num_inference_steps: Optional[int] = None
"""The number of denoising steps. More denoising steps usually lead to a higher quality
image at the expense of slower inference.
"""
scheduler: Optional[str] = None
"""Override the scheduler with a compatible one."""
seed: Optional[int] = None
"""Seed for the random number generator."""
target_size: Optional[TextToImageTargetSize] = None
"""The size in pixel of the output image""" | class_definition | 559 | 1,493 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_image.py | null | 162 |
class TextToImageInput(BaseInferenceType):
"""Inputs for Text To Image inference"""
inputs: str
"""The input text data (sometimes called "prompt")"""
parameters: Optional[TextToImageParameters] = None
"""Additional inference parameters for Text To Image""" | class_definition | 1,507 | 1,784 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_image.py | null | 163 |
class TextToImageOutput(BaseInferenceType):
"""Outputs of inference for the Text To Image task"""
image: Any
"""The generated image returned as raw bytes in the payload.""" | class_definition | 1,798 | 1,983 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/text_to_image.py | null | 164 |
class VisualQuestionAnsweringInputData(BaseInferenceType):
"""One (image, question) pair to answer"""
image: Any
"""The image."""
question: Any
"""The question to answer based on the image.""" | class_definition | 412 | 625 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/visual_question_answering.py | null | 165 |
class VisualQuestionAnsweringParameters(BaseInferenceType):
"""Additional inference parameters for Visual Question Answering"""
top_k: Optional[int] = None
"""The number of answers to return (will be chosen by order of likelihood). Note that we
return less than topk answers if there are not enough options available within the
context.
""" | class_definition | 639 | 1,004 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/visual_question_answering.py | null | 166 |
class VisualQuestionAnsweringInput(BaseInferenceType):
"""Inputs for Visual Question Answering inference"""
inputs: VisualQuestionAnsweringInputData
"""One (image, question) pair to answer"""
parameters: Optional[VisualQuestionAnsweringParameters] = None
"""Additional inference parameters for Visual Question Answering""" | class_definition | 1,018 | 1,361 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/visual_question_answering.py | null | 167 |
class VisualQuestionAnsweringOutputElement(BaseInferenceType):
"""Outputs of inference for the Visual Question Answering task"""
score: float
"""The associated score / probability"""
answer: Optional[str] = None
"""The answer to the question""" | class_definition | 1,375 | 1,640 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/visual_question_answering.py | null | 168 |
class ChatCompletionInputURL(BaseInferenceType):
url: str | class_definition | 434 | 495 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 169 |
class ChatCompletionInputMessageChunk(BaseInferenceType):
type: "ChatCompletionInputMessageChunkType"
image_url: Optional[ChatCompletionInputURL] = None
text: Optional[str] = None | class_definition | 578 | 769 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 170 |
class ChatCompletionInputMessage(BaseInferenceType):
content: Union[List[ChatCompletionInputMessageChunk], str]
role: str
name: Optional[str] = None | class_definition | 783 | 943 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 171 |
class ChatCompletionInputGrammarType(BaseInferenceType):
type: "ChatCompletionInputGrammarTypeType"
value: Any
"""A string that represents a [JSON Schema](https://json-schema.org/).
JSON Schema is a declarative language that allows to annotate JSON documents
with types and descriptions.
""" | class_definition | 1,021 | 1,336 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 172 |
class ChatCompletionInputStreamOptions(BaseInferenceType):
include_usage: bool
"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage
field on this chunk shows the token usage statistics for the entire request, and the
choices field will always be an empty array. All other chunks will also include a usage
field, but with a null value.
""" | class_definition | 1,350 | 1,750 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 173 |
class ChatCompletionInputFunctionName(BaseInferenceType):
name: str | class_definition | 1,764 | 1,835 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 174 |
class ChatCompletionInputToolChoiceClass(BaseInferenceType):
function: ChatCompletionInputFunctionName | class_definition | 1,849 | 1,955 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 175 |
class ChatCompletionInputFunctionDefinition(BaseInferenceType):
arguments: Any
name: str
description: Optional[str] = None | class_definition | 2,043 | 2,177 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 176 |
class ChatCompletionInputTool(BaseInferenceType):
function: ChatCompletionInputFunctionDefinition
type: str | class_definition | 2,191 | 2,306 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 177 |
class ChatCompletionInput(BaseInferenceType):
"""Chat Completion Input.
Auto-generated from TGI specs.
For more details, check out
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
"""
messages: List[ChatCompletionInputMessage]
"""A list of messages comprising the conversation so far."""
frequency_penalty: Optional[float] = None
"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
frequency in the text so far,
decreasing the model's likelihood to repeat the same line verbatim.
"""
logit_bias: Optional[List[float]] = None
"""UNUSED
Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
object that maps tokens
(specified by their token ID in the tokenizer) to an associated bias value from -100 to
100. Mathematically,
the bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model,
but values between -1 and 1 should decrease or increase likelihood of selection; values
like -100 or 100 should
result in a ban or exclusive selection of the relevant token.
"""
logprobs: Optional[bool] = None
"""Whether to return log probabilities of the output tokens or not. If true, returns the log
probabilities of each
output token returned in the content of message.
"""
max_tokens: Optional[int] = None
"""The maximum number of tokens that can be generated in the chat completion."""
model: Optional[str] = None
"""[UNUSED] ID of the model to use. See the model endpoint compatibility table for details
on which models work with the Chat API.
"""
n: Optional[int] = None
"""UNUSED
How many chat completion choices to generate for each input message. Note that you will
be charged based on the
number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
"""
presence_penalty: Optional[float] = None
"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they
appear in the text so far,
increasing the model's likelihood to talk about new topics
"""
response_format: Optional[ChatCompletionInputGrammarType] = None
seed: Optional[int] = None
stop: Optional[List[str]] = None
"""Up to 4 sequences where the API will stop generating further tokens."""
stream: Optional[bool] = None
stream_options: Optional[ChatCompletionInputStreamOptions] = None
temperature: Optional[float] = None
"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the
output more random, while
lower values like 0.2 will make it more focused and deterministic.
We generally recommend altering this or `top_p` but not both.
"""
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None
tool_prompt: Optional[str] = None
"""A prompt to be appended before the tools"""
tools: Optional[List[ChatCompletionInputTool]] = None
"""A list of tools the model may call. Currently, only functions are supported as a tool.
Use this to provide a list of
functions the model may generate JSON inputs for.
"""
top_logprobs: Optional[int] = None
"""An integer between 0 and 5 specifying the number of most likely tokens to return at each
token position, each with
an associated log probability. logprobs must be set to true if this parameter is used.
"""
top_p: Optional[float] = None
"""An alternative to sampling with temperature, called nucleus sampling, where the model
considers the results of the
tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%
probability mass are considered.
""" | class_definition | 2,320 | 6,217 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 178 |
class ChatCompletionOutputTopLogprob(BaseInferenceType):
logprob: float
token: str | class_definition | 6,231 | 6,321 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 179 |
class ChatCompletionOutputLogprob(BaseInferenceType):
logprob: float
token: str
top_logprobs: List[ChatCompletionOutputTopLogprob] | class_definition | 6,335 | 6,477 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 180 |
class ChatCompletionOutputLogprobs(BaseInferenceType):
content: List[ChatCompletionOutputLogprob] | class_definition | 6,491 | 6,592 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 181 |
class ChatCompletionOutputFunctionDefinition(BaseInferenceType):
arguments: Any
name: str
description: Optional[str] = None | class_definition | 6,606 | 6,741 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 182 |
class ChatCompletionOutputToolCall(BaseInferenceType):
function: ChatCompletionOutputFunctionDefinition
id: str
type: str | class_definition | 6,755 | 6,888 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 183 |
class ChatCompletionOutputMessage(BaseInferenceType):
role: str
content: Optional[str] = None
tool_calls: Optional[List[ChatCompletionOutputToolCall]] = None | class_definition | 6,902 | 7,071 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 184 |
class ChatCompletionOutputComplete(BaseInferenceType):
finish_reason: str
index: int
message: ChatCompletionOutputMessage
logprobs: Optional[ChatCompletionOutputLogprobs] = None | class_definition | 7,085 | 7,278 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 185 |
class ChatCompletionOutputUsage(BaseInferenceType):
completion_tokens: int
prompt_tokens: int
total_tokens: int | class_definition | 7,292 | 7,415 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 186 |
class ChatCompletionOutput(BaseInferenceType):
"""Chat Completion Output.
Auto-generated from TGI specs.
For more details, check out
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
"""
choices: List[ChatCompletionOutputComplete]
created: int
id: str
model: str
system_fingerprint: str
usage: ChatCompletionOutputUsage | class_definition | 7,429 | 7,847 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 187 |
class ChatCompletionStreamOutputFunction(BaseInferenceType):
arguments: str
name: Optional[str] = None | class_definition | 7,861 | 7,971 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 188 |
class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):
function: ChatCompletionStreamOutputFunction
id: str
index: int
type: str | class_definition | 7,985 | 8,140 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 189 |
class ChatCompletionStreamOutputDelta(BaseInferenceType):
role: str
content: Optional[str] = None
tool_calls: Optional[ChatCompletionStreamOutputDeltaToolCall] = None | class_definition | 8,154 | 8,332 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 190 |
class ChatCompletionStreamOutputTopLogprob(BaseInferenceType):
logprob: float
token: str | class_definition | 8,346 | 8,442 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 191 |
class ChatCompletionStreamOutputLogprob(BaseInferenceType):
logprob: float
token: str
top_logprobs: List[ChatCompletionStreamOutputTopLogprob] | class_definition | 8,456 | 8,610 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 192 |
class ChatCompletionStreamOutputLogprobs(BaseInferenceType):
content: List[ChatCompletionStreamOutputLogprob] | class_definition | 8,624 | 8,737 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 193 |
class ChatCompletionStreamOutputChoice(BaseInferenceType):
delta: ChatCompletionStreamOutputDelta
index: int
finish_reason: Optional[str] = None
logprobs: Optional[ChatCompletionStreamOutputLogprobs] = None | class_definition | 8,751 | 8,973 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 194 |
class ChatCompletionStreamOutputUsage(BaseInferenceType):
completion_tokens: int
prompt_tokens: int
total_tokens: int | class_definition | 8,987 | 9,116 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 195 |
class ChatCompletionStreamOutput(BaseInferenceType):
"""Chat Completion Stream Output.
Auto-generated from TGI specs.
For more details, check out
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
"""
choices: List[ChatCompletionStreamOutputChoice]
created: int
id: str
model: str
system_fingerprint: str
usage: Optional[ChatCompletionStreamOutputUsage] = None | class_definition | 9,130 | 9,588 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/chat_completion.py | null | 196 |
class ImageClassificationParameters(BaseInferenceType):
"""Additional inference parameters for Image Classification"""
function_to_apply: Optional["ImageClassificationOutputTransform"] = None
"""The function to apply to the model outputs in order to retrieve the scores."""
top_k: Optional[int] = None
"""When specified, limits the output to the top K most probable classes.""" | class_definition | 493 | 891 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_classification.py | null | 197 |
class ImageClassificationInput(BaseInferenceType):
"""Inputs for Image Classification inference"""
inputs: str
"""The input image data as a base64-encoded string. If no `parameters` are provided, you can
also provide the image data as a raw bytes payload.
"""
parameters: Optional[ImageClassificationParameters] = None
"""Additional inference parameters for Image Classification""" | class_definition | 905 | 1,315 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_classification.py | null | 198 |
class ImageClassificationOutputElement(BaseInferenceType):
"""Outputs of inference for the Image Classification task"""
label: str
"""The predicted class label."""
score: float
"""The corresponding probability.""" | class_definition | 1,329 | 1,563 | 0 | /Users/nielsrogge/Documents/python_projecten/huggingface_hub/src/huggingface_hub/inference/_generated/types/image_classification.py | null | 199 |