text
stringlengths 7
1.24M
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
519
|
---|---|---|---|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the implementation of the LoraPlus optimizer.
"""
from __future__ import annotations
from operator import attrgetter
import torch.nn as nn
from torch.optim import Optimizer
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.trainer_pt_utils import get_parameter_names
from ..peft_model import PeftModel
from ..tuners.lora.layer import Embedding
def create_loraplus_optimizer(
model: PeftModel, optimizer_cls: type[Optimizer], *, lr: float, loraplus_lr_ratio: float, **kwargs
) -> Optimizer:
"""
Creates a LoraPlus optimizer.
Efficient Low Rank Adaptation of Large Models: https://arxiv.org/abs/2402.12354
Reference: https://github.com/nikhil-ghosh-berkeley/loraplus/
Args:
model (`torch.nn.Module`): The model to be optimized.
optimizer_cls (`torch.optim.Optimizer`): The optimizer class to be used.
lr (`float`): The learning rate to be used for the optimizer.
loraplus_lr_ratio (`float`):
The ratio of learning ηB/ηA where ηA (lr) is passed in as the optimizer learning rate. Should be â¥1. Should
be set in tandem with the optimizer learning rate (lr); should be larger when the task is more difficult
and the model needs to update its features to learn well. In this case, it helps to make the learning rate
slightly smaller (e.g., by a factor of 2) than typical vanilla LoRA learning rates
loraplus_lr_embedding (optional `float`):
If LoRA modules are added to embedding layers your can specify a different learning rate for them. Default
value 1e-6.
kwargs (`dict`): Additional keyword arguments to be passed to the optimizer.
Returns:
`torch.optim.Optimizer`: An instance of the specified optimizer class configured with the model's parameters
organized into groups with custom learning rates.
"""
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
param_groups = {
"groupA": {},
"groupB": {},
"groupB_no_decay": {},
"embedding": {},
}
for name, param in model.named_parameters():
if not param.requires_grad:
continue
module = attrgetter(name)(model)
if isinstance(module, Embedding):
param_groups["embedding"][name] = param
elif "lora_B" in name or param.ndim == 1:
if name in decay_parameters:
param_groups["groupB"][name] = param
else:
param_groups["groupB_no_decay"][name] = param
else:
param_groups["groupA"][name] = param
kwargs["lr"] = lr
loraplus_weight_decay = kwargs.pop("loraplus_weight_decay", 0.0)
loraplus_lr_embedding = kwargs.pop("loraplus_lr_embedding", 1e-6)
optimizer_grouped_parameters = [
{
"params": list(param_groups["groupA"].values()),
"weight_decay": loraplus_weight_decay,
"lr": lr,
},
{
"params": list(param_groups["embedding"].values()),
"weight_decay": loraplus_weight_decay,
"lr": loraplus_lr_embedding,
},
{
"params": list(param_groups["groupB"].values()),
"weight_decay": loraplus_weight_decay,
"lr": lr * loraplus_lr_ratio,
},
{
"params": list(param_groups["groupB_no_decay"].values()),
"weight_decay": 0.0,
"lr": lr * loraplus_lr_ratio,
},
]
optimizer = optimizer_cls(optimizer_grouped_parameters, **kwargs)
eight_bit_names = ["Adam8bit", "AdamW8bit", "PagedAdam8bit", "PagedAdamW8bit"]
if optimizer_cls.__name__ in eight_bit_names:
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
for module in model.modules():
if isinstance(module, nn.Embedding):
manager.register_module_override(module, "weight", {"optim_bits": 32})
return optimizer
| peft/src/peft/optimizers/loraplus.py/0 | {
"file_path": "peft/src/peft/optimizers/loraplus.py",
"repo_id": "peft",
"token_count": 1910
} | 180 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .layer import IA3Layer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, IA3Layer):
# (IA)^3 implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
is_feedforward: bool,
init_ia3_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
# Freezing the pre-trained weight matrix
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
# note: no check for self.merged because merging is not supported (yet)
if self.disable_adapters:
return self.base_layer(x)
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
ia3_scaling *= self.ia3_l[active_adapter].flatten()
requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
if requires_conversion:
x = x.float()
if self.is_feedforward:
result = self.base_layer(x * ia3_scaling)
expected_dtype = result.dtype
else:
result = self.base_layer(x)
expected_dtype = result.dtype
result = result * ia3_scaling
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "ia3." + rep
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, IA3Layer):
# IA3 implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
is_feedforward: bool,
init_ia3_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
# Freezing the pre-trained weight matrix
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
# note: no check for self.merged because merging is not supported (yet)
if self.disable_adapters:
return self.base_layer(x)
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
ia3_scaling *= self.ia3_l[active_adapter].flatten()
requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
if requires_conversion:
x = x.float()
if self.is_feedforward:
result = self.base_layer(x * ia3_scaling)
expected_dtype = result.dtype
else:
result = self.base_layer(x)
expected_dtype = result.dtype
result = result * ia3_scaling
result = result.clone()
# adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch.
# This has been duplicated here.
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "ia3." + rep
| peft/src/peft/tuners/ia3/bnb.py/0 | {
"file_path": "peft/src/peft/tuners/ia3/bnb.py",
"repo_id": "peft",
"token_count": 2193
} | 181 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from dataclasses import dataclass, field
from typing import Optional, Union
from peft.tuners.prompt_tuning import PromptTuningConfig
from peft.utils import PeftType
class MultitaskPromptTuningInit(str, enum.Enum):
# initialize prompt with text
TEXT = "TEXT"
# initialize prompt with random matrix
RANDOM = "RANDOM"
# average the prefix and column matrices obtained during source training
AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS"
# pick prefix and column matrices for a particular task obtained during source training
EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK"
# only use the prompt embeddings trained during source training
ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED"
@dataclass
class MultitaskPromptTuningConfig(PromptTuningConfig):
prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field(
default=MultitaskPromptTuningInit.RANDOM,
metadata={
"help": (
"How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, "
"EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED."
),
},
)
prompt_tuning_init_state_dict_path: Optional[str] = field(
default=None,
metadata={
"help": (
"The path of source state dict. This is required when training the downstream target prompt from "
"the pretrained source prompt"
),
},
)
prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"})
num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"})
num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"})
def __post_init__(self):
self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
| peft/src/peft/tuners/multitask_prompt_tuning/config.py/0 | {
"file_path": "peft/src/peft/tuners/multitask_prompt_tuning/config.py",
"repo_id": "peft",
"token_count": 883
} | 182 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py
# with some refactor
import torch
class PrefixEncoder(torch.nn.Module):
r"""
The `torch.nn` model to encode the prefix.
Args:
config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.
Example:
```py
>>> from peft import PrefixEncoder, PrefixTuningConfig
>>> config = PrefixTuningConfig(
... peft_type="PREFIX_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... encoder_hidden_size=768,
... )
>>> prefix_encoder = PrefixEncoder(config)
```
**Attributes**:
- **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.
- **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if
`prefix_projection` is `True`.
- **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.
Input shape: (`batch_size`, `num_virtual_tokens`)
Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)
"""
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
token_dim = config.token_dim
num_layers = config.num_layers
encoder_hidden_size = config.encoder_hidden_size
num_virtual_tokens = config.num_virtual_tokens
if self.prefix_projection and not config.inference_mode:
# Use a two-layer MLP to encode the prefix
self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)
self.transform = torch.nn.Sequential(
torch.nn.Linear(token_dim, encoder_hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),
)
else:
self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)
def forward(self, prefix: torch.Tensor):
if self.prefix_projection:
prefix_tokens = self.embedding(prefix)
past_key_values = self.transform(prefix_tokens)
else:
past_key_values = self.embedding(prefix)
return past_key_values
| peft/src/peft/tuners/prefix_tuning/model.py/0 | {
"file_path": "peft/src/peft/tuners/prefix_tuning/model.py",
"repo_id": "peft",
"token_count": 1228
} | 183 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import packaging.version
import torch
import transformers
@contextmanager
def gather_params_ctx(param, modifier_rank: int = 0, fwd_module: torch.nn.Module = None):
"""Call DeepSpeed GatheredParameters context manager if DeepSpeed is enabled, otherwise do nothing."""
if packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.33.0"):
from transformers.integrations import is_deepspeed_zero3_enabled
else:
from transformers.deepspeed import is_deepspeed_zero3_enabled
if not is_deepspeed_zero3_enabled():
yield
return
import deepspeed
with deepspeed.zero.GatheredParameters(param, modifier_rank=modifier_rank, fwd_module=fwd_module):
yield
return
def dequantize_module_weight(module: torch.nn.Module) -> torch.nn.Parameter:
"""
Helper function to dequantize a quantized weight.
This function should be extended if more quantization schemes are added to the library.
If the weight is not quantized, it will be returned as is.
"""
if hasattr(module, "W_q"): # For handling HQQ quantized weight
weight = module.dequantize()
return weight
weight = module.weight
if not isinstance(weight, torch.nn.Parameter):
if isinstance(weight, torch.Tensor):
# this is an FSDP-specific edge case
return weight # type: ignore
raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead")
cls_name = weight.__class__.__name__
if cls_name not in ("Params4bit", "Int8Params"):
return weight
quant_state = getattr(module, "state", None)
device = weight.device
is_cpu = device.type == torch.device("cpu").type
weight = dequantize_bnb_weight(weight, state=quant_state) # no-op if not bnb
if is_cpu:
# dequantize_bnb_weight for 8bit moves the device in-place, thus we need to move it back to CPU if necessary
module.weight = module.weight.to(device)
return weight
def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None):
"""Helper function to dequantize 4bit or 8bit bnb weights.
Since dequantization is not supported on CPU, the weight will be temporarily moved to CUDA if necessary.
"""
import bitsandbytes as bnb
# BNB requires CUDA weights
device = weight.device
is_cpu = device.type == torch.device("cpu").type
if is_cpu:
weight = weight.to(torch.device("cuda"))
cls_name = weight.__class__.__name__
if cls_name == "Params4bit":
dequantized = bnb.functional.dequantize_4bit(weight.data, weight.quant_state)
if is_cpu:
dequantized = dequantized.to(device)
return dequantized
if state.SCB is None:
state.SCB = weight.SCB
im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device)
im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im)
im, Sim = bnb.functional.transform(im, "col32")
if state.CxB is None:
state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB)
out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB)
dequantized = bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t()
if is_cpu:
dequantized = dequantized.to(device)
return dequantized
| peft/src/peft/utils/integrations.py/0 | {
"file_path": "peft/src/peft/utils/integrations.py",
"repo_id": "peft",
"token_count": 1424
} | 184 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from parameterized import parameterized
from transformers import AutoModelForSeq2SeqLM, AutoModelForTokenClassification
from peft import LoraConfig, PromptEncoderConfig, TaskType, get_peft_model
from .testing_common import PeftCommonTester, PeftTestConfigManager
PEFT_ENCODER_DECODER_MODELS_TO_TEST = [
"ybelkada/tiny-random-T5ForConditionalGeneration-calibrated",
"hf-internal-testing/tiny-random-BartForConditionalGeneration",
]
FULL_GRID = {"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "task_type": "SEQ_2_SEQ_LM"}
class PeftEncoderDecoderModelTester(unittest.TestCase, PeftCommonTester):
r"""
Test if the PeftModel behaves as expected. This includes:
- test if the model has the expected methods
We use parametrized.expand for debugging purposes to test each model individually.
"""
transformers_class = AutoModelForSeq2SeqLM
def prepare_inputs_for_testing(self):
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
decoder_input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
input_dict = {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
}
return input_dict
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_selected_adapters_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"task_type": "SEQ_2_SEQ_LM",
},
)
)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"task_type": "SEQ_2_SEQ_LM",
},
)
)
def test_mixed_adapter_batches(self, test_name, model_id, config_cls, config_kwargs):
self._test_mixed_adapter_batches(model_id, config_cls, config_kwargs)
# skip non lora models - generate does not work for prefix tuning, prompt tuning
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_generate(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate(model_id, config_cls, config_kwargs)
# skip non lora models - generate does not work for prefix tuning, prompt tuning
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_generate_pos_args(self, test_name, model_id, config_cls, config_kwargs):
# positional arguments are not supported for PeftModelForSeq2SeqLM
self._test_generate_pos_args(model_id, config_cls, config_kwargs, raises_err=True)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate_half_prec(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs):
self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_encoder_decoders(self, test_name, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_encoder_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_layer_indexing(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_encoder_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs):
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"task_type": "SEQ_2_SEQ_LM",
},
)
)
def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_unload_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"task_type": "SEQ_2_SEQ_LM",
},
)
)
def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"task_type": "SEQ_2_SEQ_LM",
},
)
)
def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_disable_adapter(model_id, config_cls, config_kwargs)
def test_active_adapters_prompt_learning(self):
# see issue https://github.com/huggingface/transformers/pull/30790#issuecomment-2253808249
model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-BartForConditionalGeneration")
# any prompt learning method would work here
config = PromptEncoderConfig(task_type=TaskType.SEQ_2_SEQ_LM, num_virtual_tokens=10)
model = get_peft_model(model, config)
assert model.active_adapters == ["default"]
class PeftEncoderDecoderCustomModelTester(unittest.TestCase):
"""
A custom class to write any custom test related with Enc-Dec models
"""
def test_save_shared_tensors(self):
model_id = "hf-internal-testing/tiny-random-RobertaModel"
peft_config = LoraConfig(
task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all"
)
model = AutoModelForTokenClassification.from_pretrained(model_id, num_labels=11)
model = get_peft_model(model, peft_config)
with tempfile.TemporaryDirectory() as tmp_dir:
# This should work fine
model.save_pretrained(tmp_dir, safe_serialization=True)
| peft/tests/test_encoder_decoder_models.py/0 | {
"file_path": "peft/tests/test_encoder_decoder_models.py",
"repo_id": "peft",
"token_count": 5157
} | 185 |
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This test file is for tests specific to VeRA, since VeRA has some specific challenges due to the shared weights.
import os
import pytest
import torch
from safetensors import safe_open
from torch import nn
from peft import PeftModel, VeraConfig, get_peft_model
from peft.utils import infer_device
class MLP(nn.Module):
def __init__(self, bias=True):
super().__init__()
self.relu = nn.ReLU()
self.lin0 = nn.Linear(10, 20, bias=bias)
self.lin1 = nn.Linear(20, 20, bias=bias) # lin1 and lin2 have same shape
self.lin2 = nn.Linear(20, 20, bias=bias)
self.lin3 = nn.Linear(20, 2, bias=bias)
self.sm = nn.LogSoftmax(dim=-1)
def forward(self, X):
X = self.lin0(X)
X = self.relu(X)
X = self.lin1(X)
X = self.relu(X)
X = self.lin2(X)
X = self.relu(X)
X = self.lin3(X)
X = self.sm(X)
return X
class TestVera:
@pytest.fixture
def mlp(self):
torch.manual_seed(0)
model = MLP()
return model
@pytest.fixture
def mlp_same_prng(self, mlp):
torch.manual_seed(0)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
# creates a default VeRA adapter
peft_model = get_peft_model(mlp, config)
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
peft_model.add_adapter("other", config2)
return peft_model
def test_multiple_adapters_same_prng_weights(self, mlp_same_prng):
# we can have multiple adapters with the same prng key, in which case the weights should be shared
assert (
mlp_same_prng.base_model.model.lin1.vera_A["default"]
is mlp_same_prng.base_model.model.lin1.vera_A["other"]
)
assert (
mlp_same_prng.base_model.model.lin1.vera_B["default"]
is mlp_same_prng.base_model.model.lin1.vera_B["other"]
)
assert (
mlp_same_prng.base_model.model.lin2.vera_A["default"]
is mlp_same_prng.base_model.model.lin2.vera_A["other"]
)
assert (
mlp_same_prng.base_model.model.lin2.vera_B["default"]
is mlp_same_prng.base_model.model.lin2.vera_B["other"]
)
input = torch.randn(5, 10)
mlp_same_prng.set_adapter("default")
output_default = mlp_same_prng(input)
mlp_same_prng.set_adapter("other")
output_other = mlp_same_prng(input)
assert not torch.allclose(output_default, output_other, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_different_prng_raises(self):
# we cannot have multiple adapters with different prng keys
model = MLP()
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
# creates a default VeRA adapter
peft_model = get_peft_model(model, config)
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, projection_prng_key=123)
msg = (
r"Vera PRNG initialisation key must be the same for all adapters. Got config.projection_prng_key=123 but "
r"previous config had 0"
)
with pytest.raises(ValueError, match=msg):
peft_model.add_adapter("other", config2)
def test_multiple_adapters_save_load_save_projection_true(self, mlp_same_prng, tmp_path):
# check saving and loading works with multiple adapters and saved projection weights
torch.manual_seed(0)
input = torch.randn(5, 10)
mlp_same_prng.set_adapter("default")
output_default = mlp_same_prng(input)
mlp_same_prng.set_adapter("other")
output_other = mlp_same_prng(input)
# sanity check
assert not torch.allclose(output_default, output_other, atol=1e-3, rtol=1e-3)
save_path = tmp_path / "vera"
mlp_same_prng.save_pretrained(save_path)
assert os.path.exists(save_path / "adapter_config.json")
assert os.path.exists(save_path / "other" / "adapter_config.json")
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, save_path)
peft_model.load_adapter(save_path / "other", "other")
peft_model.set_adapter("default")
output_default_loaded = peft_model(input)
peft_model.set_adapter("other")
output_other_loaded = peft_model(input)
assert torch.allclose(output_default, output_default_loaded, atol=1e-3, rtol=1e-3)
assert torch.allclose(output_other, output_other_loaded, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_save_load_save_projection_false(self, mlp, tmp_path):
# check saving and loading works with multiple adapters without saved projection weights
torch.manual_seed(1)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
# creates a default VeRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
peft_model.add_adapter("second", config2)
input = torch.randn(5, 10)
peft_model.set_adapter("first")
output_first = peft_model(input)
peft_model.set_adapter("second")
output_second = peft_model(input)
# sanity check
assert not torch.allclose(output_first, output_second, atol=1e-3, rtol=1e-3)
save_path = tmp_path / "vera"
peft_model.save_pretrained(save_path)
assert os.path.exists(save_path / "first" / "adapter_config.json")
assert os.path.exists(save_path / "second" / "adapter_config.json")
torch.manual_seed(0)
mlp = MLP()
peft_model = PeftModel.from_pretrained(mlp, save_path / "first", adapter_name="first")
peft_model.load_adapter(save_path / "second", "second")
peft_model.set_adapter("first")
output_first_loaded = peft_model(input)
peft_model.set_adapter("second")
output_second_loaded = peft_model(input)
assert torch.allclose(output_first, output_first_loaded, atol=1e-3, rtol=1e-3)
assert torch.allclose(output_second, output_second_loaded, atol=1e-3, rtol=1e-3)
def test_multiple_adapters_save_projection_true_contains_vera_A_vera_B(self, mlp_same_prng, tmp_path):
# check that the state_dicts don't contain the projection weights
save_path = tmp_path / "vera"
mlp_same_prng.save_pretrained(save_path)
sd_default = {}
with safe_open(save_path / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_default[key] = f.get_tensor(key)
assert any("vera_A" in key for key in sd_default)
assert any("vera_B" in key for key in sd_default)
# default rank for VeRA is 256
assert sd_default["base_model.vera_A"].shape == (256, 20)
assert sd_default["base_model.vera_B"].shape == (20, 256)
sd_other = {}
with safe_open(save_path / "other" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_other[key] = f.get_tensor(key)
assert any("vera_A" in key for key in sd_other)
assert any("vera_B" in key for key in sd_other)
assert sd_other["base_model.vera_A"].shape == (256, 20)
assert sd_other["base_model.vera_B"].shape == (20, 256)
def test_multiple_adapters_save_projection_false_contains_no_vera_A_vera_B(self, mlp, tmp_path):
torch.manual_seed(1)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
# creates a default VeRA adapter
peft_model = get_peft_model(mlp, config, adapter_name="first")
config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False)
peft_model.add_adapter("second", config2)
save_path = tmp_path / "vera"
peft_model.save_pretrained(save_path)
sd_default = {}
with safe_open(save_path / "first" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_default[key] = f.get_tensor(key)
assert not any("vera_A" in key for key in sd_default)
assert not any("vera_B" in key for key in sd_default)
sd_other = {}
with safe_open(save_path / "second" / "adapter_model.safetensors", framework="pt", device="cpu") as f:
for key in f.keys():
sd_other[key] = f.get_tensor(key)
assert not any("vera_A" in key for key in sd_other)
assert not any("vera_B" in key for key in sd_other)
def test_vera_A_vera_B_share_memory(self, mlp_same_prng):
vera_A = mlp_same_prng.vera_A["default"]
vera_B = mlp_same_prng.vera_B["default"]
# these tensors should share the same data
assert vera_A.data_ptr() == mlp_same_prng.base_model.model.lin1.vera_A["default"].data_ptr()
assert vera_B.data_ptr() == mlp_same_prng.base_model.model.lin1.vera_B["default"].data_ptr()
assert vera_A.data_ptr() == mlp_same_prng.base_model.model.lin2.vera_A["default"].data_ptr()
assert vera_B.data_ptr() == mlp_same_prng.base_model.model.lin2.vera_B["default"].data_ptr()
# sanity check: these tensors shouldn't share the same data
assert vera_A.data_ptr() != vera_B.data_ptr()
def test_vera_lambda_dont_share_memory(self, mlp_same_prng):
# sanity check: these tensors shouldn't share the same data
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_b["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin1.vera_lambda_b["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_b["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_b["default"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_b["other"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_b["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_d["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin1.vera_lambda_d["other"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_d["default"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_d["default"].data_ptr()
)
assert (
mlp_same_prng.base_model.model.lin1.vera_lambda_d["other"].data_ptr()
!= mlp_same_prng.base_model.model.lin2.vera_lambda_d["other"].data_ptr()
)
def test_vera_different_shapes(self, mlp):
config = VeraConfig(target_modules=["lin0", "lin3"], init_weights=False)
mlp_different_shapes = get_peft_model(mlp, config)
vera_A = mlp_different_shapes.vera_A["default"]
vera_B = mlp_different_shapes.vera_B["default"]
# sanity check
assert mlp.lin0.base_layer.weight.shape != mlp.lin3.base_layer.weight.shape
# lin0 has the largest output dimension, lin3 has the largest input dimension
# vera_A should have the shape of (rank, largest_in), vera_B should have the shape of (largest_out, rank)
assert vera_A.shape == (config.r, mlp.lin3.in_features)
assert vera_B.shape == (mlp.lin0.out_features, config.r)
# should not raise
input = torch.randn(5, 10)
mlp_different_shapes(input)
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
def test_vera_dtypes(self, dtype):
if dtype == torch.bfloat16:
# skip if bf16 is not supported on hardware, see #1872
is_xpu = infer_device() == "xpu"
is_cuda_bf16 = torch.cuda.is_available() and torch.cuda.is_bf16_supported()
if not (is_xpu or is_cuda_bf16):
pytest.skip("bfloat16 not supported on this system, skipping the test")
model = MLP().to(dtype)
config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False)
peft_model = get_peft_model(model, config)
inputs = torch.randn(5, 10).to(dtype)
output = peft_model(inputs) # should not raise
assert output.dtype == dtype
| peft/tests/test_vera.py/0 | {
"file_path": "peft/tests/test_vera.py",
"repo_id": "peft",
"token_count": 5925
} | 186 |
#!/usr/bin/env python3
""" Model Benchmark Script
An inference and train step benchmark script for timm models.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import csv
import json
import logging
import time
from collections import OrderedDict
from contextlib import suppress
from functools import partial
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import resolve_data_config
from timm.layers import set_fast_norm
from timm.models import create_model, is_model, list_models
from timm.optim import create_optimizer_v2
from timm.utils import setup_default_logging, set_jit_fuser, decay_batch_step, check_batch_size_retry, ParseKwargs,\
reparameterize_model
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from deepspeed.profiling.flops_profiler import get_model_profile
has_deepspeed_profiling = True
except ImportError as e:
has_deepspeed_profiling = False
try:
from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis
has_fvcore_profiling = True
except ImportError as e:
FlopCountAnalysis = None
has_fvcore_profiling = False
try:
from functorch.compile import memory_efficient_fusion
has_functorch = True
except ImportError as e:
has_functorch = False
has_compile = hasattr(torch, 'compile')
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch Benchmark')
# benchmark specific args
parser.add_argument('--model-list', metavar='NAME', default='',
help='txt file based list of model names to benchmark')
parser.add_argument('--bench', default='both', type=str,
help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'")
parser.add_argument('--detail', action='store_true', default=False,
help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False')
parser.add_argument('--no-retry', action='store_true', default=False,
help='Do not decay batch size and retry on error.')
parser.add_argument('--results-file', default='', type=str,
help='Output csv file for validation results (summary)')
parser.add_argument('--results-format', default='csv', type=str,
help='Format for results file one of (csv, json) (default: csv).')
parser.add_argument('--num-warm-iter', default=10, type=int,
help='Number of warmup iterations (default: 10)')
parser.add_argument('--num-bench-iter', default=40, type=int,
help='Number of benchmark iterations (default: 40)')
parser.add_argument('--device', default='cuda', type=str,
help="device to run benchmark on")
# common inference / train args
parser.add_argument('--model', '-m', metavar='NAME', default='resnet50',
help='model architecture (default: resnet50)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='Run inference at train size, not test-input-size if it exists.')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--grad-checkpointing', action='store_true', default=False,
help='Enable gradient checkpointing through model blocks/stages')
parser.add_argument('--amp', action='store_true', default=False,
help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.')
parser.add_argument('--amp-dtype', default='float16', type=str,
help='lower precision AMP dtype (default: float16). Overrides --precision arg if args.amp True.')
parser.add_argument('--precision', default='float32', type=str,
help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--fast-norm', default=False, action='store_true',
help='enable experimental fast-norm')
parser.add_argument('--reparam', default=False, action='store_true',
help='Reparameterize model')
parser.add_argument('--model-kwargs', nargs='*', default={}, action=ParseKwargs)
# codegen (model compilation) options
scripting_group = parser.add_mutually_exclusive_group()
scripting_group.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
scripting_group.add_argument('--torchcompile', nargs='?', type=str, default=None, const='inductor',
help="Enable compilation w/ specified backend (default: inductor).")
scripting_group.add_argument('--aot-autograd', default=False, action='store_true',
help="Enable AOT Autograd optimization.")
# train optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# model regularization / loss params that impact model or loss fn
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
def timestamp(sync=False):
return time.perf_counter()
def cuda_timestamp(sync=False, device=None):
if sync:
torch.cuda.synchronize(device=device)
return time.perf_counter()
def count_params(model: nn.Module):
return sum([m.numel() for m in model.parameters()])
def resolve_precision(precision: str):
assert precision in ('amp', 'amp_bfloat16', 'float16', 'bfloat16', 'float32')
amp_dtype = None # amp disabled
model_dtype = torch.float32
data_dtype = torch.float32
if precision == 'amp':
amp_dtype = torch.float16
elif precision == 'amp_bfloat16':
amp_dtype = torch.bfloat16
elif precision == 'float16':
model_dtype = torch.float16
data_dtype = torch.float16
elif precision == 'bfloat16':
model_dtype = torch.bfloat16
data_dtype = torch.bfloat16
return amp_dtype, model_dtype, data_dtype
def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False):
_, macs, _ = get_model_profile(
model=model,
input_shape=(batch_size,) + input_size, # input shape/resolution
print_profile=detailed, # prints the model graph with the measured profile attached to each module
detailed=detailed, # print the detailed profile
warm_up=10, # the number of warm-ups before measuring the time of each module
as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k)
output_file=None, # path to the output file. If None, the profiler prints to stdout.
ignore_modules=None) # the list of modules to ignore in the profiling
return macs, 0 # no activation count in DS
def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False):
if force_cpu:
model = model.to('cpu')
device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype)
fca = FlopCountAnalysis(model, example_input)
aca = ActivationCountAnalysis(model, example_input)
if detailed:
fcs = flop_count_str(fca)
print(fcs)
return fca.total(), aca.total()
class BenchmarkRunner:
def __init__(
self,
model_name,
detail=False,
device='cuda',
torchscript=False,
torchcompile=None,
aot_autograd=False,
reparam=False,
precision='float32',
fuser='',
num_warm_iter=10,
num_bench_iter=50,
use_train_size=False,
**kwargs
):
self.model_name = model_name
self.detail = detail
self.device = device
self.amp_dtype, self.model_dtype, self.data_dtype = resolve_precision(precision)
self.channels_last = kwargs.pop('channels_last', False)
if self.amp_dtype is not None:
self.amp_autocast = partial(torch.cuda.amp.autocast, dtype=self.amp_dtype)
else:
self.amp_autocast = suppress
if fuser:
set_jit_fuser(fuser)
self.model = create_model(
model_name,
num_classes=kwargs.pop('num_classes', None),
in_chans=3,
global_pool=kwargs.pop('gp', 'fast'),
scriptable=torchscript,
drop_rate=kwargs.pop('drop', 0.),
drop_path_rate=kwargs.pop('drop_path', None),
drop_block_rate=kwargs.pop('drop_block', None),
**kwargs.pop('model_kwargs', {}),
)
if reparam:
self.model = reparameterize_model(self.model)
self.model.to(
device=self.device,
dtype=self.model_dtype,
memory_format=torch.channels_last if self.channels_last else None,
)
self.num_classes = self.model.num_classes
self.param_count = count_params(self.model)
_logger.info('Model %s created, param count: %d' % (model_name, self.param_count))
data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size)
self.input_size = data_config['input_size']
self.batch_size = kwargs.pop('batch_size', 256)
self.compiled = False
if torchscript:
self.model = torch.jit.script(self.model)
self.compiled = True
elif torchcompile:
assert has_compile, 'A version of torch w/ torch.compile() is required, possibly a nightly.'
torch._dynamo.reset()
self.model = torch.compile(self.model, backend=torchcompile)
self.compiled = True
elif aot_autograd:
assert has_functorch, "functorch is needed for --aot-autograd"
self.model = memory_efficient_fusion(self.model)
self.compiled = True
self.example_inputs = None
self.num_warm_iter = num_warm_iter
self.num_bench_iter = num_bench_iter
self.log_freq = num_bench_iter // 5
if 'cuda' in self.device:
self.time_fn = partial(cuda_timestamp, device=self.device)
else:
self.time_fn = timestamp
def _init_input(self):
self.example_inputs = torch.randn(
(self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype)
if self.channels_last:
self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last)
class InferenceBenchmarkRunner(BenchmarkRunner):
def __init__(
self,
model_name,
device='cuda',
torchscript=False,
**kwargs
):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.eval()
def run(self):
def _step():
t_step_start = self.time_fn()
with self.amp_autocast():
output = self.model(self.example_inputs)
t_step_end = self.time_fn(True)
return t_step_end - t_step_start
_logger.info(
f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
with torch.no_grad():
self._init_input()
for _ in range(self.num_warm_iter):
_step()
total_step = 0.
num_samples = 0
t_run_start = self.time_fn()
for i in range(self.num_bench_iter):
delta_fwd = _step()
total_step += delta_fwd
num_samples += self.batch_size
num_steps = i + 1
if num_steps % self.log_freq == 0:
_logger.info(
f"Infer [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_step / num_steps:0.3f} ms/step.")
t_run_end = self.time_fn(True)
t_run_elapsed = t_run_end - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
retries = 0 if self.compiled else 2 # skip profiling if model is scripted
while retries:
retries -= 1
try:
if has_deepspeed_profiling:
macs, _ = profile_deepspeed(self.model, self.input_size)
results['gmacs'] = round(macs / 1e9, 2)
elif has_fvcore_profiling:
macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries)
results['gmacs'] = round(macs / 1e9, 2)
results['macts'] = round(activations / 1e6, 2)
except RuntimeError as e:
pass
_logger.info(
f"Inference benchmark of {self.model_name} done. "
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step")
return results
class TrainBenchmarkRunner(BenchmarkRunner):
def __init__(
self,
model_name,
device='cuda',
torchscript=False,
**kwargs
):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.train()
self.loss = nn.CrossEntropyLoss().to(self.device)
self.target_shape = tuple()
self.optimizer = create_optimizer_v2(
self.model,
opt=kwargs.pop('opt', 'sgd'),
lr=kwargs.pop('lr', 1e-4))
if kwargs.pop('grad_checkpointing', False):
self.model.set_grad_checkpointing()
def _gen_target(self, batch_size):
return torch.empty(
(batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes)
def run(self):
def _step(detail=False):
self.optimizer.zero_grad() # can this be ignored?
t_start = self.time_fn()
t_fwd_end = t_start
t_bwd_end = t_start
with self.amp_autocast():
output = self.model(self.example_inputs)
if isinstance(output, tuple):
output = output[0]
if detail:
t_fwd_end = self.time_fn(True)
target = self._gen_target(output.shape[0])
self.loss(output, target).backward()
if detail:
t_bwd_end = self.time_fn(True)
self.optimizer.step()
t_end = self.time_fn(True)
if detail:
delta_fwd = t_fwd_end - t_start
delta_bwd = t_bwd_end - t_fwd_end
delta_opt = t_end - t_bwd_end
return delta_fwd, delta_bwd, delta_opt
else:
delta_step = t_end - t_start
return delta_step
_logger.info(
f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
self._init_input()
for _ in range(self.num_warm_iter):
_step()
t_run_start = self.time_fn()
if self.detail:
total_fwd = 0.
total_bwd = 0.
total_opt = 0.
num_samples = 0
for i in range(self.num_bench_iter):
delta_fwd, delta_bwd, delta_opt = _step(True)
num_samples += self.batch_size
total_fwd += delta_fwd
total_bwd += delta_bwd
total_opt += delta_opt
num_steps = (i + 1)
if num_steps % self.log_freq == 0:
total_step = total_fwd + total_bwd + total_opt
_logger.info(
f"Train [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd,"
f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd,"
f" {1000 * total_opt / num_steps:0.3f} ms/step opt."
)
total_step = total_fwd + total_bwd + total_opt
t_run_elapsed = self.time_fn() - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3),
bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3),
opt_time=round(1000 * total_opt / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
else:
total_step = 0.
num_samples = 0
for i in range(self.num_bench_iter):
delta_step = _step(False)
num_samples += self.batch_size
total_step += delta_step
num_steps = (i + 1)
if num_steps % self.log_freq == 0:
_logger.info(
f"Train [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_step / num_steps:0.3f} ms/step.")
t_run_elapsed = self.time_fn() - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
_logger.info(
f"Train benchmark of {self.model_name} done. "
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample")
return results
class ProfileRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', profiler='', **kwargs):
super().__init__(model_name=model_name, device=device, **kwargs)
if not profiler:
if has_deepspeed_profiling:
profiler = 'deepspeed'
elif has_fvcore_profiling:
profiler = 'fvcore'
assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work."
self.profiler = profiler
self.model.eval()
def run(self):
_logger.info(
f'Running profiler on {self.model_name} w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
macs = 0
activations = 0
if self.profiler == 'deepspeed':
macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
elif self.profiler == 'fvcore':
macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
results = dict(
gmacs=round(macs / 1e9, 2),
macts=round(activations / 1e6, 2),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
_logger.info(
f"Profile of {self.model_name} done. "
f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.")
return results
def _try_run(
model_name,
bench_fn,
bench_kwargs,
initial_batch_size,
no_batch_size_retry=False
):
batch_size = initial_batch_size
results = dict()
error_str = 'Unknown'
while batch_size:
try:
torch.cuda.empty_cache()
bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs)
results = bench.run()
return results
except RuntimeError as e:
error_str = str(e)
_logger.error(f'"{error_str}" while running benchmark.')
if not check_batch_size_retry(error_str):
_logger.error(f'Unrecoverable error encountered while benchmarking {model_name}, skipping.')
break
if no_batch_size_retry:
break
batch_size = decay_batch_step(batch_size)
_logger.warning(f'Reducing batch size to {batch_size} for retry.')
results['error'] = error_str
return results
def benchmark(args):
if args.amp:
_logger.warning("Overriding precision to 'amp' since --amp flag set.")
args.precision = 'amp' if args.amp_dtype == 'float16' else '_'.join(['amp', args.amp_dtype])
_logger.info(f'Benchmarking in {args.precision} precision. '
f'{"NHWC" if args.channels_last else "NCHW"} layout. '
f'torchscript {"enabled" if args.torchscript else "disabled"}')
bench_kwargs = vars(args).copy()
bench_kwargs.pop('amp')
model = bench_kwargs.pop('model')
batch_size = bench_kwargs.pop('batch_size')
bench_fns = (InferenceBenchmarkRunner,)
prefixes = ('infer',)
if args.bench == 'both':
bench_fns = (
InferenceBenchmarkRunner,
TrainBenchmarkRunner
)
prefixes = ('infer', 'train')
elif args.bench == 'train':
bench_fns = TrainBenchmarkRunner,
prefixes = 'train',
elif args.bench.startswith('profile'):
# specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore
if 'deepspeed' in args.bench:
assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter"
bench_kwargs['profiler'] = 'deepspeed'
elif 'fvcore' in args.bench:
assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter"
bench_kwargs['profiler'] = 'fvcore'
bench_fns = ProfileRunner,
batch_size = 1
model_results = OrderedDict(model=model)
for prefix, bench_fn in zip(prefixes, bench_fns):
run_results = _try_run(
model,
bench_fn,
bench_kwargs=bench_kwargs,
initial_batch_size=batch_size,
no_batch_size_retry=args.no_retry,
)
if prefix and 'error' not in run_results:
run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()}
model_results.update(run_results)
if 'error' in run_results:
break
if 'error' not in model_results:
param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0))
model_results.setdefault('param_count', param_count)
model_results.pop('train_param_count', 0)
return model_results
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if args.fast_norm:
set_fast_norm()
if args.model_list:
args.model = ''
with open(args.model_list) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names]
elif args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True, exclude_filters=['*in21k'])
model_cfgs = [(n, None) for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, None) for n in model_names]
if len(model_cfgs):
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
for m, _ in model_cfgs:
if not m:
continue
args.model = m
r = benchmark(args)
if r:
results.append(r)
time.sleep(10)
except KeyboardInterrupt as e:
pass
sort_key = 'infer_samples_per_sec'
if 'train' in args.bench:
sort_key = 'train_samples_per_sec'
elif 'profile' in args.bench:
sort_key = 'infer_gmacs'
results = filter(lambda x: sort_key in x, results)
results = sorted(results, key=lambda x: x[sort_key], reverse=True)
else:
results = benchmark(args)
if args.results_file:
write_results(args.results_file, results, format=args.results_format)
# output results in JSON to stdout w/ delimiter for runner script
print(f'--result\n{json.dumps(results, indent=4)}')
def write_results(results_file, results, format='csv'):
with open(results_file, mode='w') as cf:
if format == 'json':
json.dump(results, cf, indent=4)
else:
if not isinstance(results, (list, tuple)):
results = [results]
if not results:
return
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| pytorch-image-models/benchmark.py/0 | {
"file_path": "pytorch-image-models/benchmark.py",
"repo_id": "pytorch-image-models",
"token_count": 13272
} | 187 |
# Big Transfer (BiT)
**Big Transfer (BiT)** is a type of pretraining recipe that pre-trains on a large supervised source dataset, and fine-tunes the weights on the target task. Models are trained on the JFT-300M dataset. The finetuned models contained in this collection are finetuned on ImageNet.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `resnetv2_101x1_bitm`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('resnetv2_101x1_bitm', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{kolesnikov2020big,
title={Big Transfer (BiT): General Visual Representation Learning},
author={Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Joan Puigcerver and Jessica Yung and Sylvain Gelly and Neil Houlsby},
year={2020},
eprint={1912.11370},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Big Transfer
Paper:
Title: 'Big Transfer (BiT): General Visual Representation Learning'
URL: https://paperswithcode.com/paper/large-scale-learning-of-general-visual
Models:
- Name: resnetv2_101x1_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 5330896
Parameters: 44540000
File Size: 178256468
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_101x1_bitm
LR: 0.03
Epochs: 90
Layers: 101
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L444
Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x1-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.21%
Top 5 Accuracy: 96.47%
- Name: resnetv2_101x3_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 15988688
Parameters: 387930000
File Size: 1551830100
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_101x3_bitm
LR: 0.03
Epochs: 90
Layers: 101
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L451
Weights: https://storage.googleapis.com/bit_models/BiT-M-R101x3-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.38%
Top 5 Accuracy: 97.37%
- Name: resnetv2_152x2_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 10659792
Parameters: 236340000
File Size: 945476668
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
ID: resnetv2_152x2_bitm
Crop Pct: '1.0'
Image Size: '480'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L458
Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x2-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.4%
Top 5 Accuracy: 97.43%
- Name: resnetv2_152x4_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 21317584
Parameters: 936530000
File Size: 3746270104
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_152x4_bitm
Crop Pct: '1.0'
Image Size: '480'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L465
Weights: https://storage.googleapis.com/bit_models/BiT-M-R152x4-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.95%
Top 5 Accuracy: 97.45%
- Name: resnetv2_50x1_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 5330896
Parameters: 25550000
File Size: 102242668
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_50x1_bitm
LR: 0.03
Epochs: 90
Layers: 50
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L430
Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x1-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.19%
Top 5 Accuracy: 95.63%
- Name: resnetv2_50x3_bitm
In Collection: Big Transfer
Metadata:
FLOPs: 15988688
Parameters: 217320000
File Size: 869321580
Architecture:
- 1x1 Convolution
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Group Normalization
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Weight Standardization
Tasks:
- Image Classification
Training Techniques:
- Mixup
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPUv3-512
ID: resnetv2_50x3_bitm
LR: 0.03
Epochs: 90
Layers: 50
Crop Pct: '1.0'
Momentum: 0.9
Batch Size: 4096
Image Size: '480'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/resnetv2.py#L437
Weights: https://storage.googleapis.com/bit_models/BiT-M-R50x3-ILSVRC2012.npz
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.75%
Top 5 Accuracy: 97.12%
--> | pytorch-image-models/hfdocs/source/models/big-transfer.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/big-transfer.mdx",
"repo_id": "pytorch-image-models",
"token_count": 4101
} | 188 |
# Noisy Student (EfficientNet)
**Noisy Student Training** is a semi-supervised learning approach. It extends the idea of self-training
and distillation with the use of equal-or-larger student models and noise added to the student during learning. It has three main steps:
1. train a teacher model on labeled images
2. use the teacher to generate pseudo labels on unlabeled images
3. train a student model on the combination of labeled images and pseudo labeled images.
The algorithm is iterated a few times by treating the student as a teacher to relabel the unlabeled data and training a new student.
Noisy Student Training seeks to improve on self-training and distillation in two ways. First, it makes the student larger than, or at least equal to, the teacher so the student can better learn from a larger dataset. Second, it adds noise to the student so the noised student is forced to learn harder from the pseudo labels. To noise the student, it uses input noise such as RandAugment data augmentation, and model noise such as dropout and stochastic depth during training.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ns`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_efficientnet_b0_ns', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{xie2020selftraining,
title={Self-training with Noisy Student improves ImageNet classification},
author={Qizhe Xie and Minh-Thang Luong and Eduard Hovy and Quoc V. Le},
year={2020},
eprint={1911.04252},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: Noisy Student
Paper:
Title: Self-training with Noisy Student improves ImageNet classification
URL: https://paperswithcode.com/paper/self-training-with-noisy-student-improves
Models:
- Name: tf_efficientnet_b0_ns
In Collection: Noisy Student
Metadata:
FLOPs: 488688572
Parameters: 5290000
File Size: 21386709
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b0_ns
LR: 0.128
Epochs: 700
Dropout: 0.5
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1427
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.66%
Top 5 Accuracy: 94.37%
- Name: tf_efficientnet_b1_ns
In Collection: Noisy Student
Metadata:
FLOPs: 883633200
Parameters: 7790000
File Size: 31516408
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b1_ns
LR: 0.128
Epochs: 700
Dropout: 0.5
Crop Pct: '0.882'
Momentum: 0.9
Batch Size: 2048
Image Size: '240'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1437
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.39%
Top 5 Accuracy: 95.74%
- Name: tf_efficientnet_b2_ns
In Collection: Noisy Student
Metadata:
FLOPs: 1234321170
Parameters: 9110000
File Size: 36801803
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b2_ns
LR: 0.128
Epochs: 700
Dropout: 0.5
Crop Pct: '0.89'
Momentum: 0.9
Batch Size: 2048
Image Size: '260'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1447
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.39%
Top 5 Accuracy: 96.24%
- Name: tf_efficientnet_b3_ns
In Collection: Noisy Student
Metadata:
FLOPs: 2275247568
Parameters: 12230000
File Size: 49385734
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b3_ns
LR: 0.128
Epochs: 700
Dropout: 0.5
Crop Pct: '0.904'
Momentum: 0.9
Batch Size: 2048
Image Size: '300'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1457
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.04%
Top 5 Accuracy: 96.91%
- Name: tf_efficientnet_b4_ns
In Collection: Noisy Student
Metadata:
FLOPs: 5749638672
Parameters: 19340000
File Size: 77995057
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b4_ns
LR: 0.128
Epochs: 700
Dropout: 0.5
Crop Pct: '0.922'
Momentum: 0.9
Batch Size: 2048
Image Size: '380'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1467
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.15%
Top 5 Accuracy: 97.47%
- Name: tf_efficientnet_b5_ns
In Collection: Noisy Student
Metadata:
FLOPs: 13176501888
Parameters: 30390000
File Size: 122404944
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b5_ns
LR: 0.128
Epochs: 350
Dropout: 0.5
Crop Pct: '0.934'
Momentum: 0.9
Batch Size: 2048
Image Size: '456'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1477
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 86.08%
Top 5 Accuracy: 97.75%
- Name: tf_efficientnet_b6_ns
In Collection: Noisy Student
Metadata:
FLOPs: 24180518488
Parameters: 43040000
File Size: 173239537
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b6_ns
LR: 0.128
Epochs: 350
Dropout: 0.5
Crop Pct: '0.942'
Momentum: 0.9
Batch Size: 2048
Image Size: '528'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1487
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 86.45%
Top 5 Accuracy: 97.88%
- Name: tf_efficientnet_b7_ns
In Collection: Noisy Student
Metadata:
FLOPs: 48205304880
Parameters: 66349999
File Size: 266853140
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
ID: tf_efficientnet_b7_ns
LR: 0.128
Epochs: 350
Dropout: 0.5
Crop Pct: '0.949'
Momentum: 0.9
Batch Size: 2048
Image Size: '600'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1498
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 86.83%
Top 5 Accuracy: 98.08%
- Name: tf_efficientnet_l2_ns
In Collection: Noisy Student
Metadata:
FLOPs: 611646113804
Parameters: 480310000
File Size: 1925950424
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- FixRes
- Label Smoothing
- Noisy Student
- RMSProp
- RandAugment
- Weight Decay
Training Data:
- ImageNet
- JFT-300M
Training Resources: Cloud TPU v3 Pod
Training Time: 6 days
ID: tf_efficientnet_l2_ns
LR: 0.128
Epochs: 350
Dropout: 0.5
Crop Pct: '0.96'
Momentum: 0.9
Batch Size: 2048
Image Size: '800'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Stochastic Depth Survival: 0.8
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1520
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 88.35%
Top 5 Accuracy: 98.66%
--> | pytorch-image-models/hfdocs/source/models/noisy-student.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/noisy-student.mdx",
"repo_id": "pytorch-image-models",
"token_count": 6683
} | 189 |
# SPNASNet
**Single-Path NAS** is a novel differentiable NAS method for designing hardware-efficient ConvNets in less than 4 hours.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('spnasnet_100', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `spnasnet_100`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('spnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{stamoulis2019singlepath,
title={Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours},
author={Dimitrios Stamoulis and Ruizhou Ding and Di Wang and Dimitrios Lymberopoulos and Bodhi Priyantha and Jie Liu and Diana Marculescu},
year={2019},
eprint={1904.02877},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: SPNASNet
Paper:
Title: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4
Hours'
URL: https://paperswithcode.com/paper/single-path-nas-designing-hardware-efficient
Models:
- Name: spnasnet_100
In Collection: SPNASNet
Metadata:
FLOPs: 442385600
Parameters: 4420000
File Size: 17902337
Architecture:
- Average Pooling
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- ReLU
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: spnasnet_100
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L995
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.08%
Top 5 Accuracy: 91.82%
--> | pytorch-image-models/hfdocs/source/models/spnasnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/spnasnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1508
} | 190 |
# Optimization
This page contains the API reference documentation for learning rate optimizers included in `timm`.
## Optimizers
### Factory functions
[[autodoc]] timm.optim.optim_factory.create_optimizer
[[autodoc]] timm.optim.optim_factory.create_optimizer_v2
### Optimizer Classes
[[autodoc]] timm.optim.adabelief.AdaBelief
[[autodoc]] timm.optim.adafactor.Adafactor
[[autodoc]] timm.optim.adahessian.Adahessian
[[autodoc]] timm.optim.adamp.AdamP
[[autodoc]] timm.optim.adamw.AdamW
[[autodoc]] timm.optim.lamb.Lamb
[[autodoc]] timm.optim.lars.Lars
[[autodoc]] timm.optim.lookahead.Lookahead
[[autodoc]] timm.optim.madgrad.MADGRAD
[[autodoc]] timm.optim.nadam.Nadam
[[autodoc]] timm.optim.nvnovograd.NvNovoGrad
[[autodoc]] timm.optim.radam.RAdam
[[autodoc]] timm.optim.rmsprop_tf.RMSpropTF
[[autodoc]] timm.optim.sgdp.SGDP
| pytorch-image-models/hfdocs/source/reference/optimizers.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/reference/optimizers.mdx",
"repo_id": "pytorch-image-models",
"token_count": 333
} | 191 |
import logging
from .constants import *
_logger = logging.getLogger(__name__)
def resolve_data_config(
args=None,
pretrained_cfg=None,
model=None,
use_test_size=False,
verbose=False
):
assert model or args or pretrained_cfg, "At least one of model, args, or pretrained_cfg required for data config."
args = args or {}
pretrained_cfg = pretrained_cfg or {}
if not pretrained_cfg and model is not None and hasattr(model, 'pretrained_cfg'):
pretrained_cfg = model.pretrained_cfg
data_config = {}
# Resolve input/image size
in_chans = 3
if args.get('in_chans', None) is not None:
in_chans = args['in_chans']
elif args.get('chans', None) is not None:
in_chans = args['chans']
input_size = (in_chans, 224, 224)
if args.get('input_size', None) is not None:
assert isinstance(args['input_size'], (tuple, list))
assert len(args['input_size']) == 3
input_size = tuple(args['input_size'])
in_chans = input_size[0] # input_size overrides in_chans
elif args.get('img_size', None) is not None:
assert isinstance(args['img_size'], int)
input_size = (in_chans, args['img_size'], args['img_size'])
else:
if use_test_size and pretrained_cfg.get('test_input_size', None) is not None:
input_size = pretrained_cfg['test_input_size']
elif pretrained_cfg.get('input_size', None) is not None:
input_size = pretrained_cfg['input_size']
data_config['input_size'] = input_size
# resolve interpolation method
data_config['interpolation'] = 'bicubic'
if args.get('interpolation', None):
data_config['interpolation'] = args['interpolation']
elif pretrained_cfg.get('interpolation', None):
data_config['interpolation'] = pretrained_cfg['interpolation']
# resolve dataset + model mean for normalization
data_config['mean'] = IMAGENET_DEFAULT_MEAN
if args.get('mean', None) is not None:
mean = tuple(args['mean'])
if len(mean) == 1:
mean = tuple(list(mean) * in_chans)
else:
assert len(mean) == in_chans
data_config['mean'] = mean
elif pretrained_cfg.get('mean', None):
data_config['mean'] = pretrained_cfg['mean']
# resolve dataset + model std deviation for normalization
data_config['std'] = IMAGENET_DEFAULT_STD
if args.get('std', None) is not None:
std = tuple(args['std'])
if len(std) == 1:
std = tuple(list(std) * in_chans)
else:
assert len(std) == in_chans
data_config['std'] = std
elif pretrained_cfg.get('std', None):
data_config['std'] = pretrained_cfg['std']
# resolve default inference crop
crop_pct = DEFAULT_CROP_PCT
if args.get('crop_pct', None):
crop_pct = args['crop_pct']
else:
if use_test_size and pretrained_cfg.get('test_crop_pct', None):
crop_pct = pretrained_cfg['test_crop_pct']
elif pretrained_cfg.get('crop_pct', None):
crop_pct = pretrained_cfg['crop_pct']
data_config['crop_pct'] = crop_pct
# resolve default crop percentage
crop_mode = DEFAULT_CROP_MODE
if args.get('crop_mode', None):
crop_mode = args['crop_mode']
elif pretrained_cfg.get('crop_mode', None):
crop_mode = pretrained_cfg['crop_mode']
data_config['crop_mode'] = crop_mode
if verbose:
_logger.info('Data processing configuration for current model + dataset:')
for n, v in data_config.items():
_logger.info('\t%s: %s' % (n, str(v)))
return data_config
def resolve_model_data_config(
model,
args=None,
pretrained_cfg=None,
use_test_size=False,
verbose=False,
):
""" Resolve Model Data Config
This is equivalent to resolve_data_config() but with arguments re-ordered to put model first.
Args:
model (nn.Module): the model instance
args (dict): command line arguments / configuration in dict form (overrides pretrained_cfg)
pretrained_cfg (dict): pretrained model config (overrides pretrained_cfg attached to model)
use_test_size (bool): use the test time input resolution (if one exists) instead of default train resolution
verbose (bool): enable extra logging of resolved values
Returns:
dictionary of config
"""
return resolve_data_config(
args=args,
pretrained_cfg=pretrained_cfg,
model=model,
use_test_size=use_test_size,
verbose=verbose,
)
| pytorch-image-models/timm/data/config.py/0 | {
"file_path": "pytorch-image-models/timm/data/config.py",
"repo_id": "pytorch-image-models",
"token_count": 1927
} | 192 |
""" Dataset reader for HF IterableDataset
"""
import math
import os
from itertools import repeat, chain
from typing import Optional
import torch
import torch.distributed as dist
from PIL import Image
try:
import datasets
from datasets.distributed import split_dataset_by_node
from datasets.splits import SplitInfo
except ImportError as e:
print("Please install Hugging Face datasets package `pip install datasets`.")
raise e
from .class_map import load_class_map
from .reader import Reader
from .shared_count import SharedCount
SHUFFLE_SIZE = int(os.environ.get('HFIDS_SHUFFLE_SIZE', 4096))
class ReaderHfids(Reader):
def __init__(
self,
name: str,
root: Optional[str] = None,
split: str = 'train',
is_training: bool = False,
batch_size: int = 1,
download: bool = False,
repeats: int = 0,
seed: int = 42,
class_map: Optional[dict] = None,
input_key: str = 'image',
input_img_mode: str = 'RGB',
target_key: str = 'label',
target_img_mode: str = '',
shuffle_size: Optional[int] = None,
num_samples: Optional[int] = None,
):
super().__init__()
self.root = root
self.split = split
self.is_training = is_training
self.batch_size = batch_size
self.download = download
self.repeats = repeats
self.common_seed = seed # a seed that's fixed across all worker / distributed instances
self.shuffle_size = shuffle_size or SHUFFLE_SIZE
self.input_key = input_key
self.input_img_mode = input_img_mode
self.target_key = target_key
self.target_img_mode = target_img_mode
self.builder = datasets.load_dataset_builder(name, cache_dir=root)
if download:
self.builder.download_and_prepare()
split_info: Optional[SplitInfo] = None
if self.builder.info.splits and split in self.builder.info.splits:
if isinstance(self.builder.info.splits[split], SplitInfo):
split_info: Optional[SplitInfo] = self.builder.info.splits[split]
if num_samples:
self.num_samples = num_samples
elif split_info and split_info.num_examples:
self.num_samples = split_info.num_examples
else:
raise ValueError(
"Dataset length is unknown, please pass `num_samples` explicitely. "
"The number of steps needs to be known in advance for the learning rate scheduler."
)
self.remap_class = False
if class_map:
self.class_to_idx = load_class_map(class_map)
self.remap_class = True
else:
self.class_to_idx = {}
# Distributed world state
self.dist_rank = 0
self.dist_num_replicas = 1
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
self.dist_rank = dist.get_rank()
self.dist_num_replicas = dist.get_world_size()
# Attributes that are updated in _lazy_init
self.worker_info = None
self.worker_id = 0
self.num_workers = 1
self.global_worker_id = 0
self.global_num_workers = 1
# Initialized lazily on each dataloader worker process
self.ds: Optional[datasets.IterableDataset] = None
self.epoch = SharedCount()
def set_epoch(self, count):
# to update the shuffling effective_seed = seed + epoch
self.epoch.value = count
def set_loader_cfg(
self,
num_workers: Optional[int] = None,
):
if self.ds is not None:
return
if num_workers is not None:
self.num_workers = num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
def _lazy_init(self):
""" Lazily initialize worker (in worker processes)
"""
if self.worker_info is None:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
self.worker_info = worker_info
self.worker_id = worker_info.id
self.num_workers = worker_info.num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
self.global_worker_id = self.dist_rank * self.num_workers + self.worker_id
if self.download:
dataset = self.builder.as_dataset(split=self.split)
# to distribute evenly to workers
ds = dataset.to_iterable_dataset(num_shards=self.global_num_workers)
else:
# in this case the number of shard is determined by the number of remote files
ds = self.builder.as_streaming_dataset(split=self.split)
if self.is_training:
# will shuffle the list of shards and use a shuffle buffer
ds = ds.shuffle(seed=self.common_seed, buffer_size=self.shuffle_size)
# Distributed:
# The dataset has a number of shards that is a factor of `dist_num_replicas` (i.e. if `ds.n_shards % dist_num_replicas == 0`),
# so the shards are evenly assigned across the nodes.
# If it's not the case for dataset streaming, each node keeps 1 example out of `dist_num_replicas`, skipping the other examples.
# Workers:
# In a node, datasets.IterableDataset assigns the shards assigned to the node as evenly as possible to workers.
self.ds = split_dataset_by_node(ds, rank=self.dist_rank, world_size=self.dist_num_replicas)
def _num_samples_per_worker(self):
num_worker_samples = \
max(1, self.repeats) * self.num_samples / max(self.global_num_workers, self.dist_num_replicas)
if self.is_training or self.dist_num_replicas > 1:
num_worker_samples = math.ceil(num_worker_samples)
if self.is_training and self.batch_size is not None:
num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size
return int(num_worker_samples)
def __iter__(self):
if self.ds is None:
self._lazy_init()
self.ds.set_epoch(self.epoch.value)
target_sample_count = self._num_samples_per_worker()
sample_count = 0
if self.is_training:
ds_iter = chain.from_iterable(repeat(self.ds))
else:
ds_iter = iter(self.ds)
for sample in ds_iter:
input_data: Image.Image = sample[self.input_key]
if self.input_img_mode and input_data.mode != self.input_img_mode:
input_data = input_data.convert(self.input_img_mode)
target_data = sample[self.target_key]
if self.target_img_mode:
assert isinstance(target_data, Image.Image), "target_img_mode is specified but target is not an image"
if target_data.mode != self.target_img_mode:
target_data = target_data.convert(self.target_img_mode)
elif self.remap_class:
target_data = self.class_to_idx[target_data]
yield input_data, target_data
sample_count += 1
if self.is_training and sample_count >= target_sample_count:
break
def __len__(self):
num_samples = self._num_samples_per_worker() * self.num_workers
return num_samples
def _filename(self, index, basename=False, absolute=False):
assert False, "Not supported" # no random access to examples
def filenames(self, basename=False, absolute=False):
""" Return all filenames in dataset, overrides base"""
if self.ds is None:
self._lazy_init()
names = []
for sample in self.ds:
if 'file_name' in sample:
name = sample['file_name']
elif 'filename' in sample:
name = sample['filename']
elif 'id' in sample:
name = sample['id']
elif 'image_id' in sample:
name = sample['image_id']
else:
assert False, "No supported name field present"
names.append(name)
return names | pytorch-image-models/timm/data/readers/reader_hfids.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/reader_hfids.py",
"repo_id": "pytorch-image-models",
"token_count": 3722
} | 193 |
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from .config import use_fused_attn
from .mlp import Mlp
from .weight_init import trunc_normal_tf_
class AttentionPoolLatent(nn.Module):
""" Attention pooling w/ latent query
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
in_features: int,
out_features: int = None,
embed_dim: int = None,
num_heads: int = 8,
feat_size: Optional[int] = None,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
qk_norm: bool = False,
latent_len: int = 1,
latent_dim: int = None,
pos_embed: str = '',
pool_type: str = 'token',
norm_layer: Optional[nn.Module] = None,
drop: float = 0.0,
):
super().__init__()
embed_dim = embed_dim or in_features
out_features = out_features or in_features
assert embed_dim % num_heads == 0
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.feat_size = feat_size
self.scale = self.head_dim ** -0.5
self.pool = pool_type
self.fused_attn = use_fused_attn()
if pos_embed == 'abs':
assert feat_size is not None
self.pos_embed = nn.Parameter(torch.zeros(feat_size, in_features))
else:
self.pos_embed = None
self.latent_dim = latent_dim or embed_dim
self.latent_len = latent_len
self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim))
self.q = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.proj = nn.Linear(embed_dim, embed_dim)
self.proj_drop = nn.Dropout(drop)
self.norm = norm_layer(out_features) if norm_layer is not None else nn.Identity()
self.mlp = Mlp(embed_dim, int(embed_dim * mlp_ratio))
self.init_weights()
def init_weights(self):
if self.pos_embed is not None:
trunc_normal_tf_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5)
trunc_normal_tf_(self.latent, std=self.latent_dim ** -0.5)
def forward(self, x):
B, N, C = x.shape
if self.pos_embed is not None:
# FIXME interpolate
x = x + self.pos_embed.unsqueeze(0).to(x.dtype)
q_latent = self.latent.expand(B, -1, -1)
q = self.q(q_latent).reshape(B, self.latent_len, self.num_heads, self.head_dim).transpose(1, 2)
kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
q, k = self.q_norm(q), self.k_norm(k)
if self.fused_attn:
x = F.scaled_dot_product_attention(q, k, v)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
x = attn @ v
x = x.transpose(1, 2).reshape(B, self.latent_len, C)
x = self.proj(x)
x = self.proj_drop(x)
x = x + self.mlp(self.norm(x))
# optional pool if latent seq_len > 1 and pooled output is desired
if self.pool == 'token':
x = x[:, 0]
elif self.pool == 'avg':
x = x.mean(1)
return x | pytorch-image-models/timm/layers/attention_pool.py/0 | {
"file_path": "pytorch-image-models/timm/layers/attention_pool.py",
"repo_id": "pytorch-image-models",
"token_count": 1795
} | 194 |
"""
ECA module from ECAnet
paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
https://arxiv.org/abs/1910.03151
Original ECA model borrowed from https://github.com/BangguWu/ECANet
Modified circular ECA implementation and adaption for use in timm package
by Chris Ha https://github.com/VRandme
Original License:
MIT License
Copyright (c) 2019 BangguWu, Qilong Wang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import math
from torch import nn
import torch.nn.functional as F
from .create_act import create_act_layer
from .helpers import make_divisible
class EcaModule(nn.Module):
"""Constructs an ECA module.
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
gamm: used in kernel_size calc, see above
beta: used in kernel_size calc, see above
act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
gate_layer: gating non-linearity to use
"""
def __init__(
self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid',
rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False):
super(EcaModule, self).__init__()
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
assert kernel_size % 2 == 1
padding = (kernel_size - 1) // 2
if use_mlp:
# NOTE 'mlp' mode is a timm experiment, not in paper
assert channels is not None
if rd_channels is None:
rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor)
act_layer = act_layer or nn.ReLU
self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True)
self.act = create_act_layer(act_layer)
self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True)
else:
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.act = None
self.conv2 = None
self.gate = create_act_layer(gate_layer)
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv
y = self.conv(y)
if self.conv2 is not None:
y = self.act(y)
y = self.conv2(y)
y = self.gate(y).view(x.shape[0], -1, 1, 1)
return x * y.expand_as(x)
EfficientChannelAttn = EcaModule # alias
class CecaModule(nn.Module):
"""Constructs a circular ECA module.
ECA module where the conv uses circular padding rather than zero padding.
Unlike the spatial dimension, the channels do not have inherent ordering nor
locality. Although this module in essence, applies such an assumption, it is unnecessary
to limit the channels on either "edge" from being circularly adapted to each other.
This will fundamentally increase connectivity and possibly increase performance metrics
(accuracy, robustness), without significantly impacting resource metrics
(parameter size, throughput,latency, etc)
Args:
channels: Number of channels of the input feature map for use in adaptive kernel sizes
for actual calculations according to channel.
gamma, beta: when channel is given parameters of mapping function
refer to original paper https://arxiv.org/pdf/1910.03151.pdf
(default=None. if channel size not given, use k_size given for kernel size.)
kernel_size: Adaptive selection of kernel size (default=3)
gamm: used in kernel_size calc, see above
beta: used in kernel_size calc, see above
act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
gate_layer: gating non-linearity to use
"""
def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'):
super(CecaModule, self).__init__()
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
has_act = act_layer is not None
assert kernel_size % 2 == 1
# PyTorch circular padding mode is buggy as of pytorch 1.4
# see https://github.com/pytorch/pytorch/pull/17240
# implement manual circular padding
self.padding = (kernel_size - 1) // 2
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, -1)
# Manually implement circular padding, F.pad does not seemed to be bugged
y = F.pad(y, (self.padding, self.padding), mode='circular')
y = self.conv(y)
y = self.gate(y).view(x.shape[0], -1, 1, 1)
return x * y.expand_as(x)
CircularEfficientChannelAttn = CecaModule
| pytorch-image-models/timm/layers/eca.py/0 | {
"file_path": "pytorch-image-models/timm/layers/eca.py",
"repo_id": "pytorch-image-models",
"token_count": 2411
} | 195 |
""" Linear layer (alternate definition)
"""
import torch
import torch.nn.functional as F
from torch import nn as nn
class Linear(nn.Linear):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting
weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
if torch.jit.is_scripting():
bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None
return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias)
else:
return F.linear(input, self.weight, self.bias)
| pytorch-image-models/timm/layers/linear.py/0 | {
"file_path": "pytorch-image-models/timm/layers/linear.py",
"repo_id": "pytorch-image-models",
"token_count": 282
} | 196 |
""" Depthwise Separable Conv Modules
Basic DWS convs. Other variations of DWS exist with batch norm or activations between the
DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception.
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from .create_conv2d import create_conv2d
from .create_norm_act import get_norm_act_layer
class SeparableConvNormAct(nn.Module):
""" Separable Conv w/ trailing Norm and Activation
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU,
apply_act=True, drop_layer=None):
super(SeparableConvNormAct, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {}
self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
x = self.bn(x)
return x
SeparableConvBnAct = SeparableConvNormAct
class SeparableConv2d(nn.Module):
""" Separable Conv
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1):
super(SeparableConv2d, self).__init__()
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
@property
def in_channels(self):
return self.conv_dw.in_channels
@property
def out_channels(self):
return self.conv_pw.out_channels
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
return x
| pytorch-image-models/timm/layers/separable_conv.py/0 | {
"file_path": "pytorch-image-models/timm/layers/separable_conv.py",
"repo_id": "pytorch-image-models",
"token_count": 1138
} | 197 |
import dataclasses
import logging
import os
from copy import deepcopy
from typing import Any, Callable, Dict, List, Optional, Tuple
from torch import nn as nn
from torch.hub import load_state_dict_from_url
from timm.models._features import FeatureListNet, FeatureDictNet, FeatureHookNet, FeatureGetterNet
from timm.models._features_fx import FeatureGraphNet
from timm.models._helpers import load_state_dict
from timm.models._hub import has_hf_hub, download_cached_file, check_cached_file, load_state_dict_from_hf,\
load_custom_from_hf
from timm.models._manipulate import adapt_input_conv
from timm.models._pretrained import PretrainedCfg
from timm.models._prune import adapt_model_from_file
from timm.models._registry import get_pretrained_cfg
_logger = logging.getLogger(__name__)
# Global variables for rarely used pretrained checkpoint download progress and hash check.
# Use set_pretrained_download_progress / set_pretrained_check_hash functions to toggle.
_DOWNLOAD_PROGRESS = False
_CHECK_HASH = False
_USE_OLD_CACHE = int(os.environ.get('TIMM_USE_OLD_CACHE', 0)) > 0
__all__ = ['set_pretrained_download_progress', 'set_pretrained_check_hash', 'load_custom_pretrained', 'load_pretrained',
'pretrained_cfg_for_features', 'resolve_pretrained_cfg', 'build_model_with_cfg']
def _resolve_pretrained_source(pretrained_cfg):
cfg_source = pretrained_cfg.get('source', '')
pretrained_url = pretrained_cfg.get('url', None)
pretrained_file = pretrained_cfg.get('file', None)
pretrained_sd = pretrained_cfg.get('state_dict', None)
hf_hub_id = pretrained_cfg.get('hf_hub_id', None)
# resolve where to load pretrained weights from
load_from = ''
pretrained_loc = ''
if cfg_source == 'hf-hub' and has_hf_hub(necessary=True):
# hf-hub specified as source via model identifier
load_from = 'hf-hub'
assert hf_hub_id
pretrained_loc = hf_hub_id
else:
# default source == timm or unspecified
if pretrained_sd:
# direct state_dict pass through is the highest priority
load_from = 'state_dict'
pretrained_loc = pretrained_sd
assert isinstance(pretrained_loc, dict)
elif pretrained_file:
# file load override is the second-highest priority if set
load_from = 'file'
pretrained_loc = pretrained_file
else:
old_cache_valid = False
if _USE_OLD_CACHE:
# prioritized old cached weights if exists and env var enabled
old_cache_valid = check_cached_file(pretrained_url) if pretrained_url else False
if not old_cache_valid and hf_hub_id and has_hf_hub(necessary=True):
# hf-hub available as alternate weight source in default_cfg
load_from = 'hf-hub'
pretrained_loc = hf_hub_id
elif pretrained_url:
load_from = 'url'
pretrained_loc = pretrained_url
if load_from == 'hf-hub' and pretrained_cfg.get('hf_hub_filename', None):
# if a filename override is set, return tuple for location w/ (hub_id, filename)
pretrained_loc = pretrained_loc, pretrained_cfg['hf_hub_filename']
return load_from, pretrained_loc
def set_pretrained_download_progress(enable=True):
""" Set download progress for pretrained weights on/off (globally). """
global _DOWNLOAD_PROGRESS
_DOWNLOAD_PROGRESS = enable
def set_pretrained_check_hash(enable=True):
""" Set hash checking for pretrained weights on/off (globally). """
global _CHECK_HASH
_CHECK_HASH = enable
def load_custom_pretrained(
model: nn.Module,
pretrained_cfg: Optional[Dict] = None,
load_fn: Optional[Callable] = None,
):
r"""Loads a custom (read non .pth) weight file
Downloads checkpoint file into cache-dir like torch.hub based loaders, but calls
a passed in custom load fun, or the `load_pretrained` model member fn.
If the object is already present in `model_dir`, it's deserialized and returned.
The default value of `model_dir` is ``<hub_dir>/checkpoints`` where
`hub_dir` is the directory returned by :func:`~torch.hub.get_dir`.
Args:
model: The instantiated model to load weights into
pretrained_cfg (dict): Default pretrained model cfg
load_fn: An external standalone fn that loads weights into provided model, otherwise a fn named
'laod_pretrained' on the model will be called if it exists
"""
pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None)
if not pretrained_cfg:
_logger.warning("Invalid pretrained config, cannot load weights.")
return
load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg)
if not load_from:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return
if load_from == 'hf-hub':
_logger.warning("Hugging Face hub not currently supported for custom load pretrained models.")
elif load_from == 'url':
pretrained_loc = download_cached_file(
pretrained_loc,
check_hash=_CHECK_HASH,
progress=_DOWNLOAD_PROGRESS,
)
if load_fn is not None:
load_fn(model, pretrained_loc)
elif hasattr(model, 'load_pretrained'):
model.load_pretrained(pretrained_loc)
else:
_logger.warning("Valid function to load pretrained weights is not available, using random initialization.")
def load_pretrained(
model: nn.Module,
pretrained_cfg: Optional[Dict] = None,
num_classes: int = 1000,
in_chans: int = 3,
filter_fn: Optional[Callable] = None,
strict: bool = True,
):
""" Load pretrained checkpoint
Args:
model (nn.Module) : PyTorch model module
pretrained_cfg (Optional[Dict]): configuration for pretrained weights / target dataset
num_classes (int): num_classes for target model
in_chans (int): in_chans for target model
filter_fn (Optional[Callable]): state_dict filter fn for load (takes state_dict, model as args)
strict (bool): strict load of checkpoint
"""
pretrained_cfg = pretrained_cfg or getattr(model, 'pretrained_cfg', None)
if not pretrained_cfg:
raise RuntimeError("Invalid pretrained config, cannot load weights. Use `pretrained=False` for random init.")
load_from, pretrained_loc = _resolve_pretrained_source(pretrained_cfg)
if load_from == 'state_dict':
_logger.info(f'Loading pretrained weights from state dict')
state_dict = pretrained_loc # pretrained_loc is the actual state dict for this override
elif load_from == 'file':
_logger.info(f'Loading pretrained weights from file ({pretrained_loc})')
if pretrained_cfg.get('custom_load', False):
model.load_pretrained(pretrained_loc)
return
else:
state_dict = load_state_dict(pretrained_loc)
elif load_from == 'url':
_logger.info(f'Loading pretrained weights from url ({pretrained_loc})')
if pretrained_cfg.get('custom_load', False):
pretrained_loc = download_cached_file(
pretrained_loc,
progress=_DOWNLOAD_PROGRESS,
check_hash=_CHECK_HASH,
)
model.load_pretrained(pretrained_loc)
return
else:
try:
state_dict = load_state_dict_from_url(
pretrained_loc,
map_location='cpu',
progress=_DOWNLOAD_PROGRESS,
check_hash=_CHECK_HASH,
weights_only=True,
)
except TypeError:
state_dict = load_state_dict_from_url(
pretrained_loc,
map_location='cpu',
progress=_DOWNLOAD_PROGRESS,
check_hash=_CHECK_HASH,
)
elif load_from == 'hf-hub':
_logger.info(f'Loading pretrained weights from Hugging Face hub ({pretrained_loc})')
if isinstance(pretrained_loc, (list, tuple)):
custom_load = pretrained_cfg.get('custom_load', False)
if isinstance(custom_load, str) and custom_load == 'hf':
load_custom_from_hf(*pretrained_loc, model)
return
else:
state_dict = load_state_dict_from_hf(*pretrained_loc)
else:
state_dict = load_state_dict_from_hf(pretrained_loc, weights_only=True)
else:
model_name = pretrained_cfg.get('architecture', 'this model')
raise RuntimeError(f"No pretrained weights exist for {model_name}. Use `pretrained=False` for random init.")
if filter_fn is not None:
try:
state_dict = filter_fn(state_dict, model)
except TypeError as e:
# for backwards compat with filter fn that take one arg
state_dict = filter_fn(state_dict)
input_convs = pretrained_cfg.get('first_conv', None)
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(in_chans, state_dict[weight_name])
_logger.info(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
_logger.warning(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifiers = pretrained_cfg.get('classifier', None)
label_offset = pretrained_cfg.get('label_offset', 0)
if classifiers is not None:
if isinstance(classifiers, str):
classifiers = (classifiers,)
if num_classes != pretrained_cfg['num_classes']:
for classifier_name in classifiers:
# completely discard fully connected if model num_classes doesn't match pretrained weights
state_dict.pop(classifier_name + '.weight', None)
state_dict.pop(classifier_name + '.bias', None)
strict = False
elif label_offset > 0:
for classifier_name in classifiers:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
load_result = model.load_state_dict(state_dict, strict=strict)
if load_result.missing_keys:
_logger.info(
f'Missing keys ({", ".join(load_result.missing_keys)}) discovered while loading pretrained weights.'
f' This is expected if model is being adapted.')
if load_result.unexpected_keys:
_logger.warning(
f'Unexpected keys ({", ".join(load_result.unexpected_keys)}) found while loading pretrained weights.'
f' This may be expected if model is being adapted.')
def pretrained_cfg_for_features(pretrained_cfg):
pretrained_cfg = deepcopy(pretrained_cfg)
# remove default pretrained cfg fields that don't have much relevance for feature backbone
to_remove = ('num_classes', 'classifier', 'global_pool') # add default final pool size?
for tr in to_remove:
pretrained_cfg.pop(tr, None)
return pretrained_cfg
def _filter_kwargs(kwargs, names):
if not kwargs or not names:
return
for n in names:
kwargs.pop(n, None)
def _update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter):
""" Update the default_cfg and kwargs before passing to model
Args:
pretrained_cfg: input pretrained cfg (updated in-place)
kwargs: keyword args passed to model build fn (updated in-place)
kwargs_filter: keyword arg keys that must be removed before model __init__
"""
# Set model __init__ args that can be determined by default_cfg (if not already passed as kwargs)
default_kwarg_names = ('num_classes', 'global_pool', 'in_chans')
if pretrained_cfg.get('fixed_input_size', False):
# if fixed_input_size exists and is True, model takes an img_size arg that fixes its input size
default_kwarg_names += ('img_size',)
for n in default_kwarg_names:
# for legacy reasons, model __init__args uses img_size + in_chans as separate args while
# pretrained_cfg has one input_size=(C, H ,W) entry
if n == 'img_size':
input_size = pretrained_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[-2:])
elif n == 'in_chans':
input_size = pretrained_cfg.get('input_size', None)
if input_size is not None:
assert len(input_size) == 3
kwargs.setdefault(n, input_size[0])
elif n == 'num_classes':
default_val = pretrained_cfg.get(n, None)
# if default is < 0, don't pass through to model
if default_val is not None and default_val >= 0:
kwargs.setdefault(n, pretrained_cfg[n])
else:
default_val = pretrained_cfg.get(n, None)
if default_val is not None:
kwargs.setdefault(n, pretrained_cfg[n])
# Filter keyword args for task specific model variants (some 'features only' models, etc.)
_filter_kwargs(kwargs, names=kwargs_filter)
def resolve_pretrained_cfg(
variant: str,
pretrained_cfg=None,
pretrained_cfg_overlay=None,
) -> PretrainedCfg:
model_with_tag = variant
pretrained_tag = None
if pretrained_cfg:
if isinstance(pretrained_cfg, dict):
# pretrained_cfg dict passed as arg, validate by converting to PretrainedCfg
pretrained_cfg = PretrainedCfg(**pretrained_cfg)
elif isinstance(pretrained_cfg, str):
pretrained_tag = pretrained_cfg
pretrained_cfg = None
# fallback to looking up pretrained cfg in model registry by variant identifier
if not pretrained_cfg:
if pretrained_tag:
model_with_tag = '.'.join([variant, pretrained_tag])
pretrained_cfg = get_pretrained_cfg(model_with_tag)
if not pretrained_cfg:
_logger.warning(
f"No pretrained configuration specified for {model_with_tag} model. Using a default."
f" Please add a config to the model pretrained_cfg registry or pass explicitly.")
pretrained_cfg = PretrainedCfg() # instance with defaults
pretrained_cfg_overlay = pretrained_cfg_overlay or {}
if not pretrained_cfg.architecture:
pretrained_cfg_overlay.setdefault('architecture', variant)
pretrained_cfg = dataclasses.replace(pretrained_cfg, **pretrained_cfg_overlay)
return pretrained_cfg
def build_model_with_cfg(
model_cls: Callable,
variant: str,
pretrained: bool,
pretrained_cfg: Optional[Dict] = None,
pretrained_cfg_overlay: Optional[Dict] = None,
model_cfg: Optional[Any] = None,
feature_cfg: Optional[Dict] = None,
pretrained_strict: bool = True,
pretrained_filter_fn: Optional[Callable] = None,
kwargs_filter: Optional[Tuple[str]] = None,
**kwargs,
):
""" Build model with specified default_cfg and optional model_cfg
This helper fn aids in the construction of a model including:
* handling default_cfg and associated pretrained weight loading
* passing through optional model_cfg for models with config based arch spec
* features_only model adaptation
* pruning config / model adaptation
Args:
model_cls: model class
variant: model variant name
pretrained: load pretrained weights
pretrained_cfg: model's pretrained weight/task config
model_cfg: model's architecture config
feature_cfg: feature extraction adapter config
pretrained_strict: load pretrained weights strictly
pretrained_filter_fn: filter callable for pretrained weights
kwargs_filter: kwargs to filter before passing to model
**kwargs: model args passed through to model __init__
"""
pruned = kwargs.pop('pruned', False)
features = False
feature_cfg = feature_cfg or {}
# resolve and update model pretrained config and model kwargs
pretrained_cfg = resolve_pretrained_cfg(
variant,
pretrained_cfg=pretrained_cfg,
pretrained_cfg_overlay=pretrained_cfg_overlay
)
# FIXME converting back to dict, PretrainedCfg use should be propagated further, but not into model
pretrained_cfg = pretrained_cfg.to_dict()
_update_default_model_kwargs(pretrained_cfg, kwargs, kwargs_filter)
# Setup for feature extraction wrapper done at end of this fn
if kwargs.pop('features_only', False):
features = True
feature_cfg.setdefault('out_indices', (0, 1, 2, 3, 4))
if 'out_indices' in kwargs:
feature_cfg['out_indices'] = kwargs.pop('out_indices')
if 'feature_cls' in kwargs:
feature_cfg['feature_cls'] = kwargs.pop('feature_cls')
# Instantiate the model
if model_cfg is None:
model = model_cls(**kwargs)
else:
model = model_cls(cfg=model_cfg, **kwargs)
model.pretrained_cfg = pretrained_cfg
model.default_cfg = model.pretrained_cfg # alias for backwards compat
if pruned:
model = adapt_model_from_file(model, variant)
# For classification models, check class attr, then kwargs, then default to 1k, otherwise 0 for feats
num_classes_pretrained = 0 if features else getattr(model, 'num_classes', kwargs.get('num_classes', 1000))
if pretrained:
load_pretrained(
model,
pretrained_cfg=pretrained_cfg,
num_classes=num_classes_pretrained,
in_chans=kwargs.get('in_chans', 3),
filter_fn=pretrained_filter_fn,
strict=pretrained_strict,
)
# Wrap the model in a feature extraction module if enabled
if features:
use_getter = False
if 'feature_cls' in feature_cfg:
feature_cls = feature_cfg.pop('feature_cls')
if isinstance(feature_cls, str):
feature_cls = feature_cls.lower()
# flatten_sequential only valid for some feature extractors
if feature_cls not in ('dict', 'list', 'hook'):
feature_cfg.pop('flatten_sequential', None)
if 'hook' in feature_cls:
feature_cls = FeatureHookNet
elif feature_cls == 'list':
feature_cls = FeatureListNet
elif feature_cls == 'dict':
feature_cls = FeatureDictNet
elif feature_cls == 'fx':
feature_cls = FeatureGraphNet
elif feature_cls == 'getter':
use_getter = True
feature_cls = FeatureGetterNet
else:
assert False, f'Unknown feature class {feature_cls}'
else:
feature_cls = FeatureListNet
output_fmt = getattr(model, 'output_fmt', None)
if output_fmt is not None and not use_getter: # don't set default for intermediate feat getter
feature_cfg.setdefault('output_fmt', output_fmt)
model = feature_cls(model, **feature_cfg)
model.pretrained_cfg = pretrained_cfg_for_features(pretrained_cfg) # add back pretrained cfg
model.default_cfg = model.pretrained_cfg # alias for rename backwards compat (default_cfg -> pretrained_cfg)
return model
| pytorch-image-models/timm/models/_builder.py/0 | {
"file_path": "pytorch-image-models/timm/models/_builder.py",
"repo_id": "pytorch-image-models",
"token_count": 8424
} | 198 |
""" Model Registry
Hacked together by / Copyright 2020 Ross Wightman
"""
import fnmatch
import re
import sys
import warnings
from collections import defaultdict, deque
from copy import deepcopy
from dataclasses import replace
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Sequence, Union, Tuple
from ._pretrained import PretrainedCfg, DefaultCfg
__all__ = [
'split_model_name_tag', 'get_arch_name', 'register_model', 'generate_default_cfgs',
'list_models', 'list_pretrained', 'is_model', 'model_entrypoint', 'list_modules', 'is_model_in_modules',
'get_pretrained_cfg_value', 'is_model_pretrained'
]
_module_to_models: Dict[str, Set[str]] = defaultdict(set) # dict of sets to check membership of model in module
_model_to_module: Dict[str, str] = {} # mapping of model names to module names
_model_entrypoints: Dict[str, Callable[..., Any]] = {} # mapping of model names to architecture entrypoint fns
_model_has_pretrained: Set[str] = set() # set of model names that have pretrained weight url present
_model_default_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch -> default cfg objects
_model_pretrained_cfgs: Dict[str, PretrainedCfg] = {} # central repo for model arch.tag -> pretrained cfgs
_model_with_tags: Dict[str, List[str]] = defaultdict(list) # shortcut to map each model arch to all model + tag names
_module_to_deprecated_models: Dict[str, Dict[str, Optional[str]]] = defaultdict(dict)
_deprecated_models: Dict[str, Optional[str]] = {}
def split_model_name_tag(model_name: str, no_tag: str = '') -> Tuple[str, str]:
model_name, *tag_list = model_name.split('.', 1)
tag = tag_list[0] if tag_list else no_tag
return model_name, tag
def get_arch_name(model_name: str) -> str:
return split_model_name_tag(model_name)[0]
def generate_default_cfgs(cfgs: Dict[str, Union[Dict[str, Any], PretrainedCfg]]):
out = defaultdict(DefaultCfg)
default_set = set() # no tag and tags ending with * are prioritized as default
for k, v in cfgs.items():
if isinstance(v, dict):
v = PretrainedCfg(**v)
has_weights = v.has_weights
model, tag = split_model_name_tag(k)
is_default_set = model in default_set
priority = (has_weights and not tag) or (tag.endswith('*') and not is_default_set)
tag = tag.strip('*')
default_cfg = out[model]
if priority:
default_cfg.tags.appendleft(tag)
default_set.add(model)
elif has_weights and not default_cfg.is_pretrained:
default_cfg.tags.appendleft(tag)
else:
default_cfg.tags.append(tag)
if has_weights:
default_cfg.is_pretrained = True
default_cfg.cfgs[tag] = v
return out
def register_model(fn: Callable[..., Any]) -> Callable[..., Any]:
# lookup containing module
mod = sys.modules[fn.__module__]
module_name_split = fn.__module__.split('.')
module_name = module_name_split[-1] if len(module_name_split) else ''
# add model to __all__ in module
model_name = fn.__name__
if hasattr(mod, '__all__'):
mod.__all__.append(model_name)
else:
mod.__all__ = [model_name] # type: ignore
# add entries to registry dict/sets
if model_name in _model_entrypoints:
warnings.warn(
f'Overwriting {model_name} in registry with {fn.__module__}.{model_name}. This is because the name being '
'registered conflicts with an existing name. Please check if this is not expected.',
stacklevel=2,
)
_model_entrypoints[model_name] = fn
_model_to_module[model_name] = module_name
_module_to_models[module_name].add(model_name)
if hasattr(mod, 'default_cfgs') and model_name in mod.default_cfgs:
# this will catch all models that have entrypoint matching cfg key, but miss any aliasing
# entrypoints or non-matching combos
default_cfg = mod.default_cfgs[model_name]
if not isinstance(default_cfg, DefaultCfg):
# new style default cfg dataclass w/ multiple entries per model-arch
assert isinstance(default_cfg, dict)
# old style cfg dict per model-arch
pretrained_cfg = PretrainedCfg(**default_cfg)
default_cfg = DefaultCfg(tags=deque(['']), cfgs={'': pretrained_cfg})
for tag_idx, tag in enumerate(default_cfg.tags):
is_default = tag_idx == 0
pretrained_cfg = default_cfg.cfgs[tag]
model_name_tag = '.'.join([model_name, tag]) if tag else model_name
replace_items = dict(architecture=model_name, tag=tag if tag else None)
if pretrained_cfg.hf_hub_id and pretrained_cfg.hf_hub_id == 'timm/':
# auto-complete hub name w/ architecture.tag
replace_items['hf_hub_id'] = pretrained_cfg.hf_hub_id + model_name_tag
pretrained_cfg = replace(pretrained_cfg, **replace_items)
if is_default:
_model_pretrained_cfgs[model_name] = pretrained_cfg
if pretrained_cfg.has_weights:
# add tagless entry if it's default and has weights
_model_has_pretrained.add(model_name)
if tag:
_model_pretrained_cfgs[model_name_tag] = pretrained_cfg
if pretrained_cfg.has_weights:
# add model w/ tag if tag is valid
_model_has_pretrained.add(model_name_tag)
_model_with_tags[model_name].append(model_name_tag)
else:
_model_with_tags[model_name].append(model_name) # has empty tag (to slowly remove these instances)
_model_default_cfgs[model_name] = default_cfg
return fn
def _deprecated_model_shim(deprecated_name: str, current_fn: Callable = None, current_tag: str = ''):
def _fn(pretrained=False, **kwargs):
assert current_fn is not None, f'Model {deprecated_name} has been removed with no replacement.'
current_name = '.'.join([current_fn.__name__, current_tag]) if current_tag else current_fn.__name__
warnings.warn(f'Mapping deprecated model name {deprecated_name} to current {current_name}.', stacklevel=2)
pretrained_cfg = kwargs.pop('pretrained_cfg', None)
return current_fn(pretrained=pretrained, pretrained_cfg=pretrained_cfg or current_tag, **kwargs)
return _fn
def register_model_deprecations(module_name: str, deprecation_map: Dict[str, Optional[str]]):
mod = sys.modules[module_name]
module_name_split = module_name.split('.')
module_name = module_name_split[-1] if len(module_name_split) else ''
for deprecated, current in deprecation_map.items():
if hasattr(mod, '__all__'):
mod.__all__.append(deprecated)
current_fn = None
current_tag = ''
if current:
current_name, current_tag = split_model_name_tag(current)
current_fn = getattr(mod, current_name)
deprecated_entrypoint_fn = _deprecated_model_shim(deprecated, current_fn, current_tag)
setattr(mod, deprecated, deprecated_entrypoint_fn)
_model_entrypoints[deprecated] = deprecated_entrypoint_fn
_model_to_module[deprecated] = module_name
_module_to_models[module_name].add(deprecated)
_deprecated_models[deprecated] = current
_module_to_deprecated_models[module_name][deprecated] = current
def _natural_key(string_: str) -> List[Union[int, str]]:
"""See https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _expand_filter(filter: str):
""" expand a 'base_filter' to 'base_filter.*' if no tag portion"""
filter_base, filter_tag = split_model_name_tag(filter)
if not filter_tag:
return ['.'.join([filter_base, '*']), filter]
else:
return [filter]
def list_models(
filter: Union[str, List[str]] = '',
module: Union[str, List[str]] = '',
pretrained: bool = False,
exclude_filters: Union[str, List[str]] = '',
name_matches_cfg: bool = False,
include_tags: Optional[bool] = None,
) -> List[str]:
""" Return list of available model names, sorted alphabetically
Args:
filter - Wildcard filter string that works with fnmatch
module - Limit model selection to a specific submodule (ie 'vision_transformer')
pretrained - Include only models with valid pretrained weights if True
exclude_filters - Wildcard filters to exclude models after including them with filter
name_matches_cfg - Include only models w/ model_name matching default_cfg name (excludes some aliases)
include_tags - Include pretrained tags in model names (model.tag). If None, defaults
set to True when pretrained=True else False (default: None)
Returns:
models - The sorted list of models
Example:
model_list('gluon_resnet*') -- returns all models starting with 'gluon_resnet'
model_list('*resnext*, 'resnet') -- returns all models with 'resnext' in 'resnet' module
"""
if filter:
include_filters = filter if isinstance(filter, (tuple, list)) else [filter]
else:
include_filters = []
if include_tags is None:
# FIXME should this be default behaviour? or default to include_tags=True?
include_tags = pretrained
if not module:
all_models: Set[str] = set(_model_entrypoints.keys())
else:
if isinstance(module, str):
all_models: Set[str] = _module_to_models[module]
else:
assert isinstance(module, Sequence)
all_models: Set[str] = set()
for m in module:
all_models.update(_module_to_models[m])
all_models = all_models - _deprecated_models.keys() # remove deprecated models from listings
if include_tags:
# expand model names to include names w/ pretrained tags
models_with_tags: Set[str] = set()
for m in all_models:
models_with_tags.update(_model_with_tags[m])
all_models = models_with_tags
# expand include and exclude filters to include a '.*' for proper match if no tags in filter
include_filters = [ef for f in include_filters for ef in _expand_filter(f)]
exclude_filters = [ef for f in exclude_filters for ef in _expand_filter(f)]
if include_filters:
models: Set[str] = set()
for f in include_filters:
include_models = fnmatch.filter(all_models, f) # include these models
if len(include_models):
models = models.union(include_models)
else:
models = all_models
if exclude_filters:
if not isinstance(exclude_filters, (tuple, list)):
exclude_filters = [exclude_filters]
for xf in exclude_filters:
exclude_models = fnmatch.filter(models, xf) # exclude these models
if len(exclude_models):
models = models.difference(exclude_models)
if pretrained:
models = _model_has_pretrained.intersection(models)
if name_matches_cfg:
models = set(_model_pretrained_cfgs).intersection(models)
return sorted(models, key=_natural_key)
def list_pretrained(
filter: Union[str, List[str]] = '',
exclude_filters: str = '',
) -> List[str]:
return list_models(
filter=filter,
pretrained=True,
exclude_filters=exclude_filters,
include_tags=True,
)
def get_deprecated_models(module: str = '') -> Dict[str, str]:
all_deprecated = _module_to_deprecated_models[module] if module else _deprecated_models
return deepcopy(all_deprecated)
def is_model(model_name: str) -> bool:
""" Check if a model name exists
"""
arch_name = get_arch_name(model_name)
return arch_name in _model_entrypoints
def model_entrypoint(model_name: str, module_filter: Optional[str] = None) -> Callable[..., Any]:
"""Fetch a model entrypoint for specified model name
"""
arch_name = get_arch_name(model_name)
if module_filter and arch_name not in _module_to_models.get(module_filter, {}):
raise RuntimeError(f'Model ({model_name} not found in module {module_filter}.')
return _model_entrypoints[arch_name]
def list_modules() -> List[str]:
""" Return list of module names that contain models / model entrypoints
"""
modules = _module_to_models.keys()
return sorted(modules)
def is_model_in_modules(
model_name: str, module_names: Union[Tuple[str, ...], List[str], Set[str]]
) -> bool:
"""Check if a model exists within a subset of modules
Args:
model_name - name of model to check
module_names - names of modules to search in
"""
arch_name = get_arch_name(model_name)
assert isinstance(module_names, (tuple, list, set))
return any(arch_name in _module_to_models[n] for n in module_names)
def is_model_pretrained(model_name: str) -> bool:
return model_name in _model_has_pretrained
def get_pretrained_cfg(model_name: str, allow_unregistered: bool = True) -> Optional[PretrainedCfg]:
if model_name in _model_pretrained_cfgs:
return deepcopy(_model_pretrained_cfgs[model_name])
arch_name, tag = split_model_name_tag(model_name)
if arch_name in _model_default_cfgs:
# if model arch exists, but the tag is wrong, error out
raise RuntimeError(f'Invalid pretrained tag ({tag}) for {arch_name}.')
if allow_unregistered:
# if model arch doesn't exist, it has no pretrained_cfg registered, allow a default to be created
return None
raise RuntimeError(f'Model architecture ({arch_name}) has no pretrained cfg registered.')
def get_pretrained_cfg_value(model_name: str, cfg_key: str) -> Optional[Any]:
""" Get a specific model default_cfg value by key. None if key doesn't exist.
"""
cfg = get_pretrained_cfg(model_name, allow_unregistered=False)
return getattr(cfg, cfg_key, None)
| pytorch-image-models/timm/models/_registry.py/0 | {
"file_path": "pytorch-image-models/timm/models/_registry.py",
"repo_id": "pytorch-image-models",
"token_count": 5587
} | 199 |
""" EdgeNeXt
Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications`
- https://arxiv.org/abs/2206.10589
Original code and weights from https://github.com/mmaaz60/EdgeNeXt
Modifications and additions for timm by / Copyright 2022, Ross Wightman
"""
import math
from functools import partial
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, create_conv2d, \
NormMlpClassifierHead, ClassifierHead
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import named_apply, checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this
@register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method
class PositionalEncodingFourier(nn.Module):
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
def forward(self, shape: Tuple[int, int, int]):
device = self.token_projection.weight.device
dtype = self.token_projection.weight.dtype
inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool)
y_embed = inv_mask.cumsum(1, dtype=torch.float32)
x_embed = inv_mask.cumsum(2, dtype=torch.float32)
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.int64, device=device).to(torch.float32)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(),
pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(),
pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos.to(dtype))
return pos
class ConvBlock(nn.Module):
def __init__(
self,
dim,
dim_out=None,
kernel_size=7,
stride=1,
conv_bias=True,
expand_ratio=4,
ls_init_value=1e-6,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU, drop_path=0.,
):
super().__init__()
dim_out = dim_out or dim
self.shortcut_after_dw = stride > 1 or dim != dim_out
self.conv_dw = create_conv2d(
dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias)
self.norm = norm_layer(dim_out)
self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
if self.shortcut_after_dw:
shortcut = x
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.mlp(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = shortcut + self.drop_path(x)
return x
class CrossCovarianceAttn(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1)
q, k, v = qkv.unbind(0)
# NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map
attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v)
x = x.permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@torch.jit.ignore
def no_weight_decay(self):
return {'temperature'}
class SplitTransposeBlock(nn.Module):
def __init__(
self,
dim,
num_scales=1,
num_heads=8,
expand_ratio=4,
use_pos_emb=True,
conv_bias=True,
qkv_bias=True,
ls_init_value=1e-6,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop_path=0.,
attn_drop=0.,
proj_drop=0.
):
super().__init__()
width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales)))
self.width = width
self.num_scales = max(1, num_scales - 1)
convs = []
for i in range(self.num_scales):
convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias))
self.convs = nn.ModuleList(convs)
self.pos_embd = None
if use_pos_emb:
self.pos_embd = PositionalEncodingFourier(dim=dim)
self.norm_xca = norm_layer(dim)
self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.xca = CrossCovarianceAttn(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop)
self.norm = norm_layer(dim, eps=1e-6)
self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
# scales code re-written for torchscript as per my res2net fixes -rw
# NOTE torch.split(x, self.width, 1) causing issues with ONNX export
spx = x.chunk(len(self.convs) + 1, dim=1)
spo = []
sp = spx[0]
for i, conv in enumerate(self.convs):
if i > 0:
sp = sp + spx[i]
sp = conv(sp)
spo.append(sp)
spo.append(spx[-1])
x = torch.cat(spo, 1)
# XCA
B, C, H, W = x.shape
x = x.reshape(B, C, H * W).permute(0, 2, 1)
if self.pos_embd is not None:
pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x)))
x = x.reshape(B, H, W, C)
# Inverted Bottleneck
x = self.norm(x)
x = self.mlp(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = shortcut + self.drop_path(x)
return x
class EdgeNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=2,
depth=2,
num_global_blocks=1,
num_heads=4,
scales=2,
kernel_size=7,
expand_ratio=4,
use_pos_emb=False,
downsample_block=False,
conv_bias=True,
ls_init_value=1.0,
drop_path_rates=None,
norm_layer=LayerNorm2d,
norm_layer_cl=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU
):
super().__init__()
self.grad_checkpointing = False
if downsample_block or stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias)
)
in_chs = out_chs
stage_blocks = []
for i in range(depth):
if i < depth - num_global_blocks:
stage_blocks.append(
ConvBlock(
dim=in_chs,
dim_out=out_chs,
stride=stride if downsample_block and i == 0 else 1,
conv_bias=conv_bias,
kernel_size=kernel_size,
expand_ratio=expand_ratio,
ls_init_value=ls_init_value,
drop_path=drop_path_rates[i],
norm_layer=norm_layer_cl,
act_layer=act_layer,
)
)
else:
stage_blocks.append(
SplitTransposeBlock(
dim=in_chs,
num_scales=scales,
num_heads=num_heads,
expand_ratio=expand_ratio,
use_pos_emb=use_pos_emb,
conv_bias=conv_bias,
ls_init_value=ls_init_value,
drop_path=drop_path_rates[i],
norm_layer=norm_layer_cl,
act_layer=act_layer,
)
)
in_chs = out_chs
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class EdgeNeXt(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
dims=(24, 48, 88, 168),
depths=(3, 3, 9, 3),
global_block_counts=(0, 1, 1, 1),
kernel_sizes=(3, 5, 7, 9),
heads=(8, 8, 8, 8),
d2_scales=(2, 2, 3, 4),
use_pos_emb=(False, True, False, False),
ls_init_value=1e-6,
head_init_scale=1.,
expand_ratio=4,
downsample_block=False,
conv_bias=True,
stem_type='patch',
head_norm_first=False,
act_layer=nn.GELU,
drop_path_rate=0.,
drop_rate=0.,
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.drop_rate = drop_rate
norm_layer = partial(LayerNorm2d, eps=1e-6)
norm_layer_cl = partial(nn.LayerNorm, eps=1e-6)
self.feature_info = []
assert stem_type in ('patch', 'overlap')
if stem_type == 'patch':
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias),
norm_layer(dims[0]),
)
else:
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias),
norm_layer(dims[0]),
)
curr_stride = 4
stages = []
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
in_chs = dims[0]
for i in range(4):
stride = 2 if curr_stride == 2 or i > 0 else 1
# FIXME support dilation / output_stride
curr_stride *= stride
stages.append(EdgeNeXtStage(
in_chs=in_chs,
out_chs=dims[i],
stride=stride,
depth=depths[i],
num_global_blocks=global_block_counts[i],
num_heads=heads[i],
drop_path_rates=dp_rates[i],
scales=d2_scales[i],
expand_ratio=expand_ratio,
kernel_size=kernel_sizes[i],
use_pos_emb=use_pos_emb[i],
ls_init_value=ls_init_value,
downsample_block=downsample_block,
conv_bias=conv_bias,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
act_layer=act_layer,
))
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
in_chs = dims[i]
self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = self.head_hidden_size = dims[-1]
if head_norm_first:
self.norm_pre = norm_layer(self.num_features)
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
else:
self.norm_pre = nn.Identity()
self.head = NormMlpClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
norm_layer=norm_layer,
)
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm_pre', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm_pre(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_tf_(module.weight, std=.02)
nn.init.zeros_(module.bias)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict:
return state_dict # non-FB checkpoint
# models were released as train checkpoints... :/
if 'model_ema' in state_dict:
state_dict = state_dict['model_ema']
elif 'model' in state_dict:
state_dict = state_dict['model']
elif 'state_dict' in state_dict:
state_dict = state_dict['state_dict']
out_dict = {}
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
k = k.replace('head.', 'head.fc.')
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_edgenext(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
EdgeNeXt, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'edgenext_xx_small.in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'edgenext_x_small.in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=1.0),
'edgenext_small.usi_in1k': _cfg( # USI weights
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
'edgenext_base.usi_in1k': _cfg( # USI weights
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
'edgenext_base.in21k_ft_in1k': _cfg( # USI weights
hf_hub_id='timm/',
crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
'edgenext_small_rw.sw_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 320, 320), test_crop_pct=1.0,
),
})
@register_model
def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt:
# 1.33M & 260.58M @ 256 resolution
# 71.23% Top-1 accuracy
# No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=51.66 versus 47.67 for MobileViT_XXS
# For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS
model_args = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4))
return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt:
# 2.34M & 538.0M @ 256 resolution
# 75.00% Top-1 accuracy
# No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=31.61 versus 28.49 for MobileViT_XS
# For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS
model_args = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4))
return _create_edgenext('edgenext_x_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt:
# 5.59M & 1260.59M @ 256 resolution
# 79.43% Top-1 accuracy
# AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=20.47 versus 18.86 for MobileViT_S
# For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S
model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304))
return _create_edgenext('edgenext_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt:
# 18.51M & 3840.93M @ 256 resolution
# 82.5% (normal) 83.7% (USI) Top-1 accuracy
# AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler
# Jetson FPS=xx.xx versus xx.xx for MobileViT_S
# For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx
model_args = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584])
return _create_edgenext('edgenext_base', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt:
model_args = dict(
depths=(3, 3, 9, 3), dims=(48, 96, 192, 384),
downsample_block=True, conv_bias=False, stem_type='overlap')
return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/edgenext.py/0 | {
"file_path": "pytorch-image-models/timm/models/edgenext.py",
"repo_id": "pytorch-image-models",
"token_count": 11055
} | 200 |
""" PP-HGNet (V1 & V2)
Reference:
https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/ImageNet1k/PP-HGNetV2.md
The Paddle Implement of PP-HGNet (https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/docs/en/models/PP-HGNet_en.md)
PP-HGNet: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet.py
PP-HGNetv2: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet_v2.py
"""
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SelectAdaptivePool2d, DropPath, create_conv2d
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
from ._manipulate import checkpoint_seq
__all__ = ['HighPerfGpuNet']
class LearnableAffineBlock(nn.Module):
def __init__(
self,
scale_value=1.0,
bias_value=0.0
):
super().__init__()
self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True)
self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True)
def forward(self, x):
return self.scale * x + self.bias
class ConvBNAct(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size,
stride=1,
groups=1,
padding='',
use_act=True,
use_lab=False
):
super().__init__()
self.use_act = use_act
self.use_lab = use_lab
self.conv = create_conv2d(
in_chs,
out_chs,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
)
self.bn = nn.BatchNorm2d(out_chs)
if self.use_act:
self.act = nn.ReLU()
else:
self.act = nn.Identity()
if self.use_act and self.use_lab:
self.lab = LearnableAffineBlock()
else:
self.lab = nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
x = self.lab(x)
return x
class LightConvBNAct(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size,
groups=1,
use_lab=False
):
super().__init__()
self.conv1 = ConvBNAct(
in_chs,
out_chs,
kernel_size=1,
use_act=False,
use_lab=use_lab,
)
self.conv2 = ConvBNAct(
out_chs,
out_chs,
kernel_size=kernel_size,
groups=out_chs,
use_act=True,
use_lab=use_lab,
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class EseModule(nn.Module):
def __init__(self, chs):
super().__init__()
self.conv = nn.Conv2d(
chs,
chs,
kernel_size=1,
stride=1,
padding=0,
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
identity = x
x = x.mean((2, 3), keepdim=True)
x = self.conv(x)
x = self.sigmoid(x)
return torch.mul(identity, x)
class StemV1(nn.Module):
# for PP-HGNet
def __init__(self, stem_chs):
super().__init__()
self.stem = nn.Sequential(*[
ConvBNAct(
stem_chs[i],
stem_chs[i + 1],
kernel_size=3,
stride=2 if i == 0 else 1) for i in range(
len(stem_chs) - 1)
])
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.stem(x)
x = self.pool(x)
return x
class StemV2(nn.Module):
# for PP-HGNetv2
def __init__(self, in_chs, mid_chs, out_chs, use_lab=False):
super().__init__()
self.stem1 = ConvBNAct(
in_chs,
mid_chs,
kernel_size=3,
stride=2,
use_lab=use_lab,
)
self.stem2a = ConvBNAct(
mid_chs,
mid_chs // 2,
kernel_size=2,
stride=1,
use_lab=use_lab,
)
self.stem2b = ConvBNAct(
mid_chs // 2,
mid_chs,
kernel_size=2,
stride=1,
use_lab=use_lab,
)
self.stem3 = ConvBNAct(
mid_chs * 2,
mid_chs,
kernel_size=3,
stride=2,
use_lab=use_lab,
)
self.stem4 = ConvBNAct(
mid_chs,
out_chs,
kernel_size=1,
stride=1,
use_lab=use_lab,
)
self.pool = nn.MaxPool2d(kernel_size=2, stride=1, ceil_mode=True)
def forward(self, x):
x = self.stem1(x)
x = F.pad(x, (0, 1, 0, 1))
x2 = self.stem2a(x)
x2 = F.pad(x2, (0, 1, 0, 1))
x2 = self.stem2b(x2)
x1 = self.pool(x)
x = torch.cat([x1, x2], dim=1)
x = self.stem3(x)
x = self.stem4(x)
return x
class HighPerfGpuBlock(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
layer_num,
kernel_size=3,
residual=False,
light_block=False,
use_lab=False,
agg='ese',
drop_path=0.,
):
super().__init__()
self.residual = residual
self.layers = nn.ModuleList()
for i in range(layer_num):
if light_block:
self.layers.append(
LightConvBNAct(
in_chs if i == 0 else mid_chs,
mid_chs,
kernel_size=kernel_size,
use_lab=use_lab,
)
)
else:
self.layers.append(
ConvBNAct(
in_chs if i == 0 else mid_chs,
mid_chs,
kernel_size=kernel_size,
stride=1,
use_lab=use_lab,
)
)
# feature aggregation
total_chs = in_chs + layer_num * mid_chs
if agg == 'se':
aggregation_squeeze_conv = ConvBNAct(
total_chs,
out_chs // 2,
kernel_size=1,
stride=1,
use_lab=use_lab,
)
aggregation_excitation_conv = ConvBNAct(
out_chs // 2,
out_chs,
kernel_size=1,
stride=1,
use_lab=use_lab,
)
self.aggregation = nn.Sequential(
aggregation_squeeze_conv,
aggregation_excitation_conv,
)
else:
aggregation_conv = ConvBNAct(
total_chs,
out_chs,
kernel_size=1,
stride=1,
use_lab=use_lab,
)
att = EseModule(out_chs)
self.aggregation = nn.Sequential(
aggregation_conv,
att,
)
self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
def forward(self, x):
identity = x
output = [x]
for layer in self.layers:
x = layer(x)
output.append(x)
x = torch.cat(output, dim=1)
x = self.aggregation(x)
if self.residual:
x = self.drop_path(x) + identity
return x
class HighPerfGpuStage(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
block_num,
layer_num,
downsample=True,
stride=2,
light_block=False,
kernel_size=3,
use_lab=False,
agg='ese',
drop_path=0.,
):
super().__init__()
self.downsample = downsample
if downsample:
self.downsample = ConvBNAct(
in_chs,
in_chs,
kernel_size=3,
stride=stride,
groups=in_chs,
use_act=False,
use_lab=use_lab,
)
else:
self.downsample = nn.Identity()
blocks_list = []
for i in range(block_num):
blocks_list.append(
HighPerfGpuBlock(
in_chs if i == 0 else out_chs,
mid_chs,
out_chs,
layer_num,
residual=False if i == 0 else True,
kernel_size=kernel_size,
light_block=light_block,
use_lab=use_lab,
agg=agg,
drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path,
)
)
self.blocks = nn.Sequential(*blocks_list)
self.grad_checkpointing= False
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x, flatten=False)
else:
x = self.blocks(x)
return x
class ClassifierHead(nn.Module):
def __init__(
self,
in_features: int,
num_classes: int,
pool_type: str = 'avg',
drop_rate: float = 0.,
hidden_size: Optional[int] = 2048,
use_lab: bool = False
):
super(ClassifierHead, self).__init__()
self.num_features = in_features
if pool_type is not None:
if not pool_type:
assert num_classes == 0, 'Classifier head must be removed if pooling is disabled'
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
if hidden_size is not None:
self.num_features = hidden_size
last_conv = nn.Conv2d(
in_features,
hidden_size,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
act = nn.ReLU()
if use_lab:
lab = LearnableAffineBlock()
self.last_conv = nn.Sequential(last_conv, act, lab)
else:
self.last_conv = nn.Sequential(last_conv, act)
else:
self.last_conv = nn.Identity()
self.dropout = nn.Dropout(drop_rate)
self.flatten = nn.Flatten(1) if pool_type else nn.Identity() # don't flatten if pooling disabled
self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def reset(self, num_classes: int, pool_type: Optional[str] = None):
if pool_type is not None:
if not pool_type:
assert num_classes == 0, 'Classifier head must be removed if pooling is disabled'
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
self.flatten = nn.Flatten(1) if pool_type else nn.Identity() # don't flatten if pooling disabled
self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.last_conv(x)
x = self.dropout(x)
x = self.flatten(x)
if pre_logits:
return x
x = self.fc(x)
return x
class HighPerfGpuNet(nn.Module):
def __init__(
self,
cfg: Dict,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
head_hidden_size: Optional[int] = 2048,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
use_lab: bool = False,
**kwargs,
):
super(HighPerfGpuNet, self).__init__()
stem_type = cfg["stem_type"]
stem_chs = cfg["stem_chs"]
stages_cfg = [cfg["stage1"], cfg["stage2"], cfg["stage3"], cfg["stage4"]]
self.num_classes = num_classes
self.drop_rate = drop_rate
self.use_lab = use_lab
assert stem_type in ['v1', 'v2']
if stem_type == 'v2':
self.stem = StemV2(
in_chs=in_chans,
mid_chs=stem_chs[0],
out_chs=stem_chs[1],
use_lab=use_lab)
else:
self.stem = StemV1([in_chans] + stem_chs)
current_stride = 4
stages = []
self.feature_info = []
block_depths = [c[3] for c in stages_cfg]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(block_depths)).split(block_depths)]
for i, stage_config in enumerate(stages_cfg):
in_chs, mid_chs, out_chs, block_num, downsample, light_block, kernel_size, layer_num = stage_config
stages += [HighPerfGpuStage(
in_chs=in_chs,
mid_chs=mid_chs,
out_chs=out_chs,
block_num=block_num,
layer_num=layer_num,
downsample=downsample,
light_block=light_block,
kernel_size=kernel_size,
use_lab=use_lab,
agg='ese' if stem_type == 'v1' else 'se',
drop_path=dpr[i],
)]
self.num_features = out_chs
if downsample:
current_stride *= 2
self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.head = ClassifierHead(
self.num_features,
num_classes=num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
hidden_size=head_hidden_size,
use_lab=use_lab
)
self.head_hidden_size = self.head.num_features
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
return self.stages(x)
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
model_cfgs = dict(
# PP-HGNet
hgnet_tiny={
"stem_type": 'v1',
"stem_chs": [48, 48, 96],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [96, 96, 224, 1, False, False, 3, 5],
"stage2": [224, 128, 448, 1, True, False, 3, 5],
"stage3": [448, 160, 512, 2, True, False, 3, 5],
"stage4": [512, 192, 768, 1, True, False, 3, 5],
},
hgnet_small={
"stem_type": 'v1',
"stem_chs": [64, 64, 128],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [128, 128, 256, 1, False, False, 3, 6],
"stage2": [256, 160, 512, 1, True, False, 3, 6],
"stage3": [512, 192, 768, 2, True, False, 3, 6],
"stage4": [768, 224, 1024, 1, True, False, 3, 6],
},
hgnet_base={
"stem_type": 'v1',
"stem_chs": [96, 96, 160],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [160, 192, 320, 1, False, False, 3, 7],
"stage2": [320, 224, 640, 2, True, False, 3, 7],
"stage3": [640, 256, 960, 3, True, False, 3, 7],
"stage4": [960, 288, 1280, 2, True, False, 3, 7],
},
# PP-HGNetv2
hgnetv2_b0={
"stem_type": 'v2',
"stem_chs": [16, 16],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [16, 16, 64, 1, False, False, 3, 3],
"stage2": [64, 32, 256, 1, True, False, 3, 3],
"stage3": [256, 64, 512, 2, True, True, 5, 3],
"stage4": [512, 128, 1024, 1, True, True, 5, 3],
},
hgnetv2_b1={
"stem_type": 'v2',
"stem_chs": [24, 32],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [32, 32, 64, 1, False, False, 3, 3],
"stage2": [64, 48, 256, 1, True, False, 3, 3],
"stage3": [256, 96, 512, 2, True, True, 5, 3],
"stage4": [512, 192, 1024, 1, True, True, 5, 3],
},
hgnetv2_b2={
"stem_type": 'v2',
"stem_chs": [24, 32],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [32, 32, 96, 1, False, False, 3, 4],
"stage2": [96, 64, 384, 1, True, False, 3, 4],
"stage3": [384, 128, 768, 3, True, True, 5, 4],
"stage4": [768, 256, 1536, 1, True, True, 5, 4],
},
hgnetv2_b3={
"stem_type": 'v2',
"stem_chs": [24, 32],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [32, 32, 128, 1, False, False, 3, 5],
"stage2": [128, 64, 512, 1, True, False, 3, 5],
"stage3": [512, 128, 1024, 3, True, True, 5, 5],
"stage4": [1024, 256, 2048, 1, True, True, 5, 5],
},
hgnetv2_b4={
"stem_type": 'v2',
"stem_chs": [32, 48],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [48, 48, 128, 1, False, False, 3, 6],
"stage2": [128, 96, 512, 1, True, False, 3, 6],
"stage3": [512, 192, 1024, 3, True, True, 5, 6],
"stage4": [1024, 384, 2048, 1, True, True, 5, 6],
},
hgnetv2_b5={
"stem_type": 'v2',
"stem_chs": [32, 64],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [64, 64, 128, 1, False, False, 3, 6],
"stage2": [128, 128, 512, 2, True, False, 3, 6],
"stage3": [512, 256, 1024, 5, True, True, 5, 6],
"stage4": [1024, 512, 2048, 2, True, True, 5, 6],
},
hgnetv2_b6={
"stem_type": 'v2',
"stem_chs": [48, 96],
# in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
"stage1": [96, 96, 192, 2, False, False, 3, 6],
"stage2": [192, 192, 512, 3, True, False, 3, 6],
"stage3": [512, 384, 1024, 6, True, True, 5, 6],
"stage4": [1024, 768, 2048, 3, True, True, 5, 6],
},
)
def _create_hgnet(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
return build_model_with_cfg(
HighPerfGpuNet,
variant,
pretrained,
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.965, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'classifier': 'head.fc', 'first_conv': 'stem.stem1.conv',
'test_crop_pct': 1.0, 'test_input_size': (3, 288, 288),
**kwargs,
}
default_cfgs = generate_default_cfgs({
'hgnet_tiny.paddle_in1k': _cfg(
first_conv='stem.stem.0.conv',
hf_hub_id='timm/'),
'hgnet_tiny.ssld_in1k': _cfg(
first_conv='stem.stem.0.conv',
hf_hub_id='timm/'),
'hgnet_small.paddle_in1k': _cfg(
first_conv='stem.stem.0.conv',
hf_hub_id='timm/'),
'hgnet_small.ssld_in1k': _cfg(
first_conv='stem.stem.0.conv',
hf_hub_id='timm/'),
'hgnet_base.ssld_in1k': _cfg(
first_conv='stem.stem.0.conv',
hf_hub_id='timm/'),
'hgnetv2_b0.ssld_stage2_ft_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b0.ssld_stage1_in22k_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b1.ssld_stage2_ft_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b1.ssld_stage1_in22k_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b2.ssld_stage2_ft_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b2.ssld_stage1_in22k_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b3.ssld_stage2_ft_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b3.ssld_stage1_in22k_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b4.ssld_stage2_ft_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b4.ssld_stage1_in22k_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b5.ssld_stage2_ft_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b5.ssld_stage1_in22k_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b6.ssld_stage2_ft_in1k': _cfg(
hf_hub_id='timm/'),
'hgnetv2_b6.ssld_stage1_in22k_in1k': _cfg(
hf_hub_id='timm/'),
})
@register_model
def hgnet_tiny(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnet_tiny', pretrained=pretrained, **kwargs)
@register_model
def hgnet_small(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnet_small', pretrained=pretrained, **kwargs)
@register_model
def hgnet_base(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnet_base', pretrained=pretrained, **kwargs)
@register_model
def hgnetv2_b0(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnetv2_b0', pretrained=pretrained, use_lab=True, **kwargs)
@register_model
def hgnetv2_b1(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnetv2_b1', pretrained=pretrained, use_lab=True, **kwargs)
@register_model
def hgnetv2_b2(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnetv2_b2', pretrained=pretrained, use_lab=True, **kwargs)
@register_model
def hgnetv2_b3(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnetv2_b3', pretrained=pretrained, use_lab=True, **kwargs)
@register_model
def hgnetv2_b4(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnetv2_b4', pretrained=pretrained, **kwargs)
@register_model
def hgnetv2_b5(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnetv2_b5', pretrained=pretrained, **kwargs)
@register_model
def hgnetv2_b6(pretrained=False, **kwargs) -> HighPerfGpuNet:
return _create_hgnet('hgnetv2_b6', pretrained=pretrained, **kwargs)
| pytorch-image-models/timm/models/hgnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/hgnet.py",
"repo_id": "pytorch-image-models",
"token_count": 13177
} | 201 |
""" Multi-Scale Vision Transformer v2
@inproceedings{li2021improved,
title={MViTv2: Improved multiscale vision transformers for classification and detection},
author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph},
booktitle={CVPR},
year={2022}
}
Code adapted from original Apache 2.0 licensed impl at https://github.com/facebookresearch/mvit
Original copyright below.
Modifications and timm support by / Copyright 2022, Ross Wightman
"""
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved. All Rights Reserved.
import operator
from collections import OrderedDict
from dataclasses import dataclass
from functools import partial, reduce
from typing import Union, List, Tuple, Optional
import torch
import torch.utils.checkpoint as checkpoint
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import Mlp, DropPath, trunc_normal_tf_, get_norm_layer, to_2tuple
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._features_fx import register_notrace_function
from ._registry import register_model, register_model_deprecations, generate_default_cfgs
__all__ = ['MultiScaleVit', 'MultiScaleVitCfg'] # model_registry will add each entrypoint fn to this
@dataclass
class MultiScaleVitCfg:
depths: Tuple[int, ...] = (2, 3, 16, 3)
embed_dim: Union[int, Tuple[int, ...]] = 96
num_heads: Union[int, Tuple[int, ...]] = 1
mlp_ratio: float = 4.
pool_first: bool = False
expand_attn: bool = True
qkv_bias: bool = True
use_cls_token: bool = False
use_abs_pos: bool = False
residual_pooling: bool = True
mode: str = 'conv'
kernel_qkv: Tuple[int, int] = (3, 3)
stride_q: Optional[Tuple[Tuple[int, int]]] = ((1, 1), (2, 2), (2, 2), (2, 2))
stride_kv: Optional[Tuple[Tuple[int, int]]] = None
stride_kv_adaptive: Optional[Tuple[int, int]] = (4, 4)
patch_kernel: Tuple[int, int] = (7, 7)
patch_stride: Tuple[int, int] = (4, 4)
patch_padding: Tuple[int, int] = (3, 3)
pool_type: str = 'max'
rel_pos_type: str = 'spatial'
act_layer: Union[str, Tuple[str, str]] = 'gelu'
norm_layer: Union[str, Tuple[str, str]] = 'layernorm'
norm_eps: float = 1e-6
def __post_init__(self):
num_stages = len(self.depths)
if not isinstance(self.embed_dim, (tuple, list)):
self.embed_dim = tuple(self.embed_dim * 2 ** i for i in range(num_stages))
assert len(self.embed_dim) == num_stages
if not isinstance(self.num_heads, (tuple, list)):
self.num_heads = tuple(self.num_heads * 2 ** i for i in range(num_stages))
assert len(self.num_heads) == num_stages
if self.stride_kv_adaptive is not None and self.stride_kv is None:
_stride_kv = self.stride_kv_adaptive
pool_kv_stride = []
for i in range(num_stages):
if min(self.stride_q[i]) > 1:
_stride_kv = [
max(_stride_kv[d] // self.stride_q[i][d], 1)
for d in range(len(_stride_kv))
]
pool_kv_stride.append(tuple(_stride_kv))
self.stride_kv = tuple(pool_kv_stride)
def prod(iterable):
return reduce(operator.mul, iterable, 1)
class PatchEmbed(nn.Module):
"""
PatchEmbed.
"""
def __init__(
self,
dim_in=3,
dim_out=768,
kernel=(7, 7),
stride=(4, 4),
padding=(3, 3),
):
super().__init__()
self.proj = nn.Conv2d(
dim_in,
dim_out,
kernel_size=kernel,
stride=stride,
padding=padding,
)
def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
x = self.proj(x)
# B C H W -> B HW C
return x.flatten(2).transpose(1, 2), x.shape[-2:]
@register_notrace_function
def reshape_pre_pool(
x,
feat_size: List[int],
has_cls_token: bool = True
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
H, W = feat_size
if has_cls_token:
cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :]
else:
cls_tok = None
x = x.reshape(-1, H, W, x.shape[-1]).permute(0, 3, 1, 2).contiguous()
return x, cls_tok
@register_notrace_function
def reshape_post_pool(
x,
num_heads: int,
cls_tok: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, List[int]]:
feat_size = [x.shape[2], x.shape[3]]
L_pooled = x.shape[2] * x.shape[3]
x = x.reshape(-1, num_heads, x.shape[1], L_pooled).transpose(2, 3)
if cls_tok is not None:
x = torch.cat((cls_tok, x), dim=2)
return x, feat_size
@register_notrace_function
def cal_rel_pos_type(
attn: torch.Tensor,
q: torch.Tensor,
has_cls_token: bool,
q_size: List[int],
k_size: List[int],
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
):
"""
Spatial Relative Positional Embeddings.
"""
sp_idx = 1 if has_cls_token else 0
q_h, q_w = q_size
k_h, k_w = k_size
# Scale up rel pos if shapes for q and k are different.
q_h_ratio = max(k_h / q_h, 1.0)
k_h_ratio = max(q_h / k_h, 1.0)
dist_h = (
torch.arange(q_h, device=q.device).unsqueeze(-1) * q_h_ratio -
torch.arange(k_h, device=q.device).unsqueeze(0) * k_h_ratio
)
dist_h += (k_h - 1) * k_h_ratio
q_w_ratio = max(k_w / q_w, 1.0)
k_w_ratio = max(q_w / k_w, 1.0)
dist_w = (
torch.arange(q_w, device=q.device).unsqueeze(-1) * q_w_ratio -
torch.arange(k_w, device=q.device).unsqueeze(0) * k_w_ratio
)
dist_w += (k_w - 1) * k_w_ratio
rel_h = rel_pos_h[dist_h.long()]
rel_w = rel_pos_w[dist_w.long()]
B, n_head, q_N, dim = q.shape
r_q = q[:, :, sp_idx:].reshape(B, n_head, q_h, q_w, dim)
rel_h = torch.einsum("byhwc,hkc->byhwk", r_q, rel_h)
rel_w = torch.einsum("byhwc,wkc->byhwk", r_q, rel_w)
attn[:, :, sp_idx:, sp_idx:] = (
attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w)
+ rel_h.unsqueeze(-1)
+ rel_w.unsqueeze(-2)
).view(B, -1, q_h * q_w, k_h * k_w)
return attn
class MultiScaleAttentionPoolFirst(nn.Module):
def __init__(
self,
dim,
dim_out,
feat_size,
num_heads=8,
qkv_bias=True,
mode="conv",
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
has_cls_token=True,
rel_pos_type='spatial',
residual_pooling=True,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.head_dim = dim_out // num_heads
self.scale = self.head_dim ** -0.5
self.has_cls_token = has_cls_token
padding_q = tuple([int(q // 2) for q in kernel_q])
padding_kv = tuple([int(kv // 2) for kv in kernel_kv])
self.q = nn.Linear(dim, dim_out, bias=qkv_bias)
self.k = nn.Linear(dim, dim_out, bias=qkv_bias)
self.v = nn.Linear(dim, dim_out, bias=qkv_bias)
self.proj = nn.Linear(dim_out, dim_out)
# Skip pooling with kernel and stride size of (1, 1, 1).
if prod(kernel_q) == 1 and prod(stride_q) == 1:
kernel_q = None
if prod(kernel_kv) == 1 and prod(stride_kv) == 1:
kernel_kv = None
self.mode = mode
self.unshared = mode == 'conv_unshared'
self.pool_q, self.pool_k, self.pool_v = None, None, None
self.norm_q, self.norm_k, self.norm_v = None, None, None
if mode in ("avg", "max"):
pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d
if kernel_q:
self.pool_q = pool_op(kernel_q, stride_q, padding_q)
if kernel_kv:
self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv)
self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv)
elif mode == "conv" or mode == "conv_unshared":
dim_conv = dim // num_heads if mode == "conv" else dim
if kernel_q:
self.pool_q = nn.Conv2d(
dim_conv,
dim_conv,
kernel_q,
stride=stride_q,
padding=padding_q,
groups=dim_conv,
bias=False,
)
self.norm_q = norm_layer(dim_conv)
if kernel_kv:
self.pool_k = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_k = norm_layer(dim_conv)
self.pool_v = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_v = norm_layer(dim_conv)
else:
raise NotImplementedError(f"Unsupported model {mode}")
# relative pos embedding
self.rel_pos_type = rel_pos_type
if self.rel_pos_type == 'spatial':
assert feat_size[0] == feat_size[1]
size = feat_size[0]
q_size = size // stride_q[1] if len(stride_q) > 0 else size
kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size
rel_sp_dim = 2 * max(q_size, kv_size) - 1
self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
trunc_normal_tf_(self.rel_pos_h, std=0.02)
trunc_normal_tf_(self.rel_pos_w, std=0.02)
self.residual_pooling = residual_pooling
def forward(self, x, feat_size: List[int]):
B, N, _ = x.shape
fold_dim = 1 if self.unshared else self.num_heads
x = x.reshape(B, N, fold_dim, -1).permute(0, 2, 1, 3)
q = k = v = x
if self.pool_q is not None:
q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token)
q = self.pool_q(q)
q, q_size = reshape_post_pool(q, self.num_heads, q_tok)
else:
q_size = feat_size
if self.norm_q is not None:
q = self.norm_q(q)
if self.pool_k is not None:
k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token)
k = self.pool_k(k)
k, k_size = reshape_post_pool(k, self.num_heads, k_tok)
else:
k_size = feat_size
if self.norm_k is not None:
k = self.norm_k(k)
if self.pool_v is not None:
v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token)
v = self.pool_v(v)
v, v_size = reshape_post_pool(v, self.num_heads, v_tok)
else:
v_size = feat_size
if self.norm_v is not None:
v = self.norm_v(v)
q_N = q_size[0] * q_size[1] + int(self.has_cls_token)
q = q.transpose(1, 2).reshape(B, q_N, -1)
q = self.q(q).reshape(B, q_N, self.num_heads, -1).transpose(1, 2)
k_N = k_size[0] * k_size[1] + int(self.has_cls_token)
k = k.transpose(1, 2).reshape(B, k_N, -1)
k = self.k(k).reshape(B, k_N, self.num_heads, -1)
v_N = v_size[0] * v_size[1] + int(self.has_cls_token)
v = v.transpose(1, 2).reshape(B, v_N, -1)
v = self.v(v).reshape(B, v_N, self.num_heads, -1).transpose(1, 2)
attn = (q * self.scale) @ k
if self.rel_pos_type == 'spatial':
attn = cal_rel_pos_type(
attn,
q,
self.has_cls_token,
q_size,
k_size,
self.rel_pos_h,
self.rel_pos_w,
)
attn = attn.softmax(dim=-1)
x = attn @ v
if self.residual_pooling:
x = x + q
x = x.transpose(1, 2).reshape(B, -1, self.dim_out)
x = self.proj(x)
return x, q_size
class MultiScaleAttention(nn.Module):
def __init__(
self,
dim,
dim_out,
feat_size,
num_heads=8,
qkv_bias=True,
mode="conv",
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
has_cls_token=True,
rel_pos_type='spatial',
residual_pooling=True,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.head_dim = dim_out // num_heads
self.scale = self.head_dim ** -0.5
self.has_cls_token = has_cls_token
padding_q = tuple([int(q // 2) for q in kernel_q])
padding_kv = tuple([int(kv // 2) for kv in kernel_kv])
self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias)
self.proj = nn.Linear(dim_out, dim_out)
# Skip pooling with kernel and stride size of (1, 1, 1).
if prod(kernel_q) == 1 and prod(stride_q) == 1:
kernel_q = None
if prod(kernel_kv) == 1 and prod(stride_kv) == 1:
kernel_kv = None
self.mode = mode
self.unshared = mode == 'conv_unshared'
self.norm_q, self.norm_k, self.norm_v = None, None, None
self.pool_q, self.pool_k, self.pool_v = None, None, None
if mode in ("avg", "max"):
pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d
if kernel_q:
self.pool_q = pool_op(kernel_q, stride_q, padding_q)
if kernel_kv:
self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv)
self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv)
elif mode == "conv" or mode == "conv_unshared":
dim_conv = dim_out // num_heads if mode == "conv" else dim_out
if kernel_q:
self.pool_q = nn.Conv2d(
dim_conv,
dim_conv,
kernel_q,
stride=stride_q,
padding=padding_q,
groups=dim_conv,
bias=False,
)
self.norm_q = norm_layer(dim_conv)
if kernel_kv:
self.pool_k = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_k = norm_layer(dim_conv)
self.pool_v = nn.Conv2d(
dim_conv,
dim_conv,
kernel_kv,
stride=stride_kv,
padding=padding_kv,
groups=dim_conv,
bias=False,
)
self.norm_v = norm_layer(dim_conv)
else:
raise NotImplementedError(f"Unsupported model {mode}")
# relative pos embedding
self.rel_pos_type = rel_pos_type
if self.rel_pos_type == 'spatial':
assert feat_size[0] == feat_size[1]
size = feat_size[0]
q_size = size // stride_q[1] if len(stride_q) > 0 else size
kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size
rel_sp_dim = 2 * max(q_size, kv_size) - 1
self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim))
trunc_normal_tf_(self.rel_pos_h, std=0.02)
trunc_normal_tf_(self.rel_pos_w, std=0.02)
self.residual_pooling = residual_pooling
def forward(self, x, feat_size: List[int]):
B, N, _ = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(dim=0)
if self.pool_q is not None:
q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token)
q = self.pool_q(q)
q, q_size = reshape_post_pool(q, self.num_heads, q_tok)
else:
q_size = feat_size
if self.norm_q is not None:
q = self.norm_q(q)
if self.pool_k is not None:
k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token)
k = self.pool_k(k)
k, k_size = reshape_post_pool(k, self.num_heads, k_tok)
else:
k_size = feat_size
if self.norm_k is not None:
k = self.norm_k(k)
if self.pool_v is not None:
v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token)
v = self.pool_v(v)
v, _ = reshape_post_pool(v, self.num_heads, v_tok)
if self.norm_v is not None:
v = self.norm_v(v)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.rel_pos_type == 'spatial':
attn = cal_rel_pos_type(
attn,
q,
self.has_cls_token,
q_size,
k_size,
self.rel_pos_h,
self.rel_pos_w,
)
attn = attn.softmax(dim=-1)
x = attn @ v
if self.residual_pooling:
x = x + q
x = x.transpose(1, 2).reshape(B, -1, self.dim_out)
x = self.proj(x)
return x, q_size
class MultiScaleBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
num_heads,
feat_size,
mlp_ratio=4.0,
qkv_bias=True,
drop_path=0.0,
norm_layer=nn.LayerNorm,
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
mode="conv",
has_cls_token=True,
expand_attn=False,
pool_first=False,
rel_pos_type='spatial',
residual_pooling=True,
):
super().__init__()
proj_needed = dim != dim_out
self.dim = dim
self.dim_out = dim_out
self.has_cls_token = has_cls_token
self.norm1 = norm_layer(dim)
self.shortcut_proj_attn = nn.Linear(dim, dim_out) if proj_needed and expand_attn else None
if stride_q and prod(stride_q) > 1:
kernel_skip = [s + 1 if s > 1 else s for s in stride_q]
stride_skip = stride_q
padding_skip = [int(skip // 2) for skip in kernel_skip]
self.shortcut_pool_attn = nn.MaxPool2d(kernel_skip, stride_skip, padding_skip)
else:
self.shortcut_pool_attn = None
att_dim = dim_out if expand_attn else dim
attn_layer = MultiScaleAttentionPoolFirst if pool_first else MultiScaleAttention
self.attn = attn_layer(
dim,
att_dim,
num_heads=num_heads,
feat_size=feat_size,
qkv_bias=qkv_bias,
kernel_q=kernel_q,
kernel_kv=kernel_kv,
stride_q=stride_q,
stride_kv=stride_kv,
norm_layer=norm_layer,
has_cls_token=has_cls_token,
mode=mode,
rel_pos_type=rel_pos_type,
residual_pooling=residual_pooling,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(att_dim)
mlp_dim_out = dim_out
self.shortcut_proj_mlp = nn.Linear(dim, dim_out) if proj_needed and not expand_attn else None
self.mlp = Mlp(
in_features=att_dim,
hidden_features=int(att_dim * mlp_ratio),
out_features=mlp_dim_out,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def _shortcut_pool(self, x, feat_size: List[int]):
if self.shortcut_pool_attn is None:
return x
if self.has_cls_token:
cls_tok, x = x[:, :1, :], x[:, 1:, :]
else:
cls_tok = None
B, L, C = x.shape
H, W = feat_size
x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous()
x = self.shortcut_pool_attn(x)
x = x.reshape(B, C, -1).transpose(1, 2)
if cls_tok is not None:
x = torch.cat((cls_tok, x), dim=1)
return x
def forward(self, x, feat_size: List[int]):
x_norm = self.norm1(x)
# NOTE as per the original impl, this seems odd, but shortcut uses un-normalized input if no proj
x_shortcut = x if self.shortcut_proj_attn is None else self.shortcut_proj_attn(x_norm)
x_shortcut = self._shortcut_pool(x_shortcut, feat_size)
x, feat_size_new = self.attn(x_norm, feat_size)
x = x_shortcut + self.drop_path1(x)
x_norm = self.norm2(x)
x_shortcut = x if self.shortcut_proj_mlp is None else self.shortcut_proj_mlp(x_norm)
x = x_shortcut + self.drop_path2(self.mlp(x_norm))
return x, feat_size_new
class MultiScaleVitStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
num_heads,
feat_size,
mlp_ratio=4.0,
qkv_bias=True,
mode="conv",
kernel_q=(1, 1),
kernel_kv=(1, 1),
stride_q=(1, 1),
stride_kv=(1, 1),
has_cls_token=True,
expand_attn=False,
pool_first=False,
rel_pos_type='spatial',
residual_pooling=True,
norm_layer=nn.LayerNorm,
drop_path=0.0,
):
super().__init__()
self.grad_checkpointing = False
self.blocks = nn.ModuleList()
if expand_attn:
out_dims = (dim_out,) * depth
else:
out_dims = (dim,) * (depth - 1) + (dim_out,)
for i in range(depth):
attention_block = MultiScaleBlock(
dim=dim,
dim_out=out_dims[i],
num_heads=num_heads,
feat_size=feat_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
kernel_q=kernel_q,
kernel_kv=kernel_kv,
stride_q=stride_q if i == 0 else (1, 1),
stride_kv=stride_kv,
mode=mode,
has_cls_token=has_cls_token,
pool_first=pool_first,
rel_pos_type=rel_pos_type,
residual_pooling=residual_pooling,
expand_attn=expand_attn,
norm_layer=norm_layer,
drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path,
)
dim = out_dims[i]
self.blocks.append(attention_block)
if i == 0:
feat_size = tuple([size // stride for size, stride in zip(feat_size, stride_q)])
self.feat_size = feat_size
def forward(self, x, feat_size: List[int]):
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x, feat_size = checkpoint.checkpoint(blk, x, feat_size)
else:
x, feat_size = blk(x, feat_size)
return x, feat_size
class MultiScaleVit(nn.Module):
"""
Improved Multiscale Vision Transformers for Classification and Detection
Yanghao Li*, Chao-Yuan Wu*, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik,
Christoph Feichtenhofer*
https://arxiv.org/abs/2112.01526
Multiscale Vision Transformers
Haoqi Fan*, Bo Xiong*, Karttikeya Mangalam*, Yanghao Li*, Zhicheng Yan, Jitendra Malik,
Christoph Feichtenhofer*
https://arxiv.org/abs/2104.11227
"""
def __init__(
self,
cfg: MultiScaleVitCfg,
img_size: Tuple[int, int] = (224, 224),
in_chans: int = 3,
global_pool: Optional[str] = None,
num_classes: int = 1000,
drop_path_rate: float = 0.,
drop_rate: float = 0.,
):
super().__init__()
img_size = to_2tuple(img_size)
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
self.num_classes = num_classes
self.drop_rate = drop_rate
if global_pool is None:
global_pool = 'token' if cfg.use_cls_token else 'avg'
self.global_pool = global_pool
self.depths = tuple(cfg.depths)
self.expand_attn = cfg.expand_attn
embed_dim = cfg.embed_dim[0]
self.patch_embed = PatchEmbed(
dim_in=in_chans,
dim_out=embed_dim,
kernel=cfg.patch_kernel,
stride=cfg.patch_stride,
padding=cfg.patch_padding,
)
patch_dims = (img_size[0] // cfg.patch_stride[0], img_size[1] // cfg.patch_stride[1])
num_patches = prod(patch_dims)
if cfg.use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.num_prefix_tokens = 1
pos_embed_dim = num_patches + 1
else:
self.num_prefix_tokens = 0
self.cls_token = None
pos_embed_dim = num_patches
if cfg.use_abs_pos:
self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_dim, embed_dim))
else:
self.pos_embed = None
num_stages = len(cfg.embed_dim)
feat_size = patch_dims
curr_stride = max(cfg.patch_stride)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
self.stages = nn.ModuleList()
self.feature_info = []
for i in range(num_stages):
if cfg.expand_attn:
dim_out = cfg.embed_dim[i]
else:
dim_out = cfg.embed_dim[min(i + 1, num_stages - 1)]
stage = MultiScaleVitStage(
dim=embed_dim,
dim_out=dim_out,
depth=cfg.depths[i],
num_heads=cfg.num_heads[i],
feat_size=feat_size,
mlp_ratio=cfg.mlp_ratio,
qkv_bias=cfg.qkv_bias,
mode=cfg.mode,
pool_first=cfg.pool_first,
expand_attn=cfg.expand_attn,
kernel_q=cfg.kernel_qkv,
kernel_kv=cfg.kernel_qkv,
stride_q=cfg.stride_q[i],
stride_kv=cfg.stride_kv[i],
has_cls_token=cfg.use_cls_token,
rel_pos_type=cfg.rel_pos_type,
residual_pooling=cfg.residual_pooling,
norm_layer=norm_layer,
drop_path=dpr[i],
)
curr_stride *= max(cfg.stride_q[i])
self.feature_info += [dict(module=f'block.{i}', num_chs=dim_out, reduction=curr_stride)]
embed_dim = dim_out
feat_size = stage.feat_size
self.stages.append(stage)
self.num_features = self.head_hidden_size = embed_dim
self.norm = norm_layer(embed_dim)
self.head = nn.Sequential(OrderedDict([
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
if self.pos_embed is not None:
trunc_normal_tf_(self.pos_embed, std=0.02)
if self.cls_token is not None:
trunc_normal_tf_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_tf_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
@torch.jit.ignore
def no_weight_decay(self):
return {k for k, _ in self.named_parameters()
if any(n in k for n in ["pos_embed", "rel_pos_h", "rel_pos_w", "cls_token"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Sequential(OrderedDict([
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW', 'NLC'), 'Output shape must be NCHW or NLC.'
reshape = output_fmt == 'NCHW'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# FIXME slice block/pos_block if < max
# forward pass
x, feat_size = self.patch_embed(x)
B = x.shape[0]
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
for i, stage in enumerate(self.stages):
x, feat_size = stage(x, feat_size)
if i in take_indices:
if norm and i == (len(self.stages) - 1):
x_inter = self.norm(x) # applying final norm last intermediate
else:
x_inter = x
if reshape:
if self.cls_token is not None:
# possible to allow return of class tokens, TBD
x_inter = x_inter[:, 1:]
x_inter = x_inter.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2)
intermediates.append(x_inter)
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# FIXME add stage pruning
# self.stages = self.stages[:max_index] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x, feat_size = self.patch_embed(x)
B, N, C = x.shape
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
for stage in self.stages:
x, feat_size = stage(x, feat_size)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
if self.global_pool == 'avg':
x = x[:, self.num_prefix_tokens:].mean(1)
else:
x = x[:, 0]
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
if 'stages.0.blocks.0.norm1.weight' in state_dict:
# native checkpoint, look for rel_pos interpolations
for k in state_dict.keys():
if 'rel_pos' in k:
rel_pos = state_dict[k]
dest_rel_pos_shape = model.state_dict()[k].shape
if rel_pos.shape[0] != dest_rel_pos_shape[0]:
rel_pos_resized = torch.nn.functional.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=dest_rel_pos_shape[0],
mode="linear",
)
state_dict[k] = rel_pos_resized.reshape(-1, dest_rel_pos_shape[0]).permute(1, 0)
return state_dict
import re
if 'model_state' in state_dict:
state_dict = state_dict['model_state']
depths = getattr(model, 'depths', None)
expand_attn = getattr(model, 'expand_attn', True)
assert depths is not None, 'model requires depth attribute to remap checkpoints'
depth_map = {}
block_idx = 0
for stage_idx, d in enumerate(depths):
depth_map.update({i: (stage_idx, i - block_idx) for i in range(block_idx, block_idx + d)})
block_idx += d
out_dict = {}
for k, v in state_dict.items():
k = re.sub(
r'blocks\.(\d+)',
lambda x: f'stages.{depth_map[int(x.group(1))][0]}.blocks.{depth_map[int(x.group(1))][1]}',
k)
if expand_attn:
k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_attn', k)
else:
k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_mlp', k)
if 'head' in k:
k = k.replace('head.projection', 'head.fc')
out_dict[k] = v
return out_dict
model_cfgs = dict(
mvitv2_tiny=MultiScaleVitCfg(
depths=(1, 2, 5, 2),
),
mvitv2_small=MultiScaleVitCfg(
depths=(1, 2, 11, 2),
),
mvitv2_base=MultiScaleVitCfg(
depths=(2, 3, 16, 3),
),
mvitv2_large=MultiScaleVitCfg(
depths=(2, 6, 36, 4),
embed_dim=144,
num_heads=2,
expand_attn=False,
),
mvitv2_small_cls=MultiScaleVitCfg(
depths=(1, 2, 11, 2),
use_cls_token=True,
),
mvitv2_base_cls=MultiScaleVitCfg(
depths=(2, 3, 16, 3),
use_cls_token=True,
),
mvitv2_large_cls=MultiScaleVitCfg(
depths=(2, 6, 36, 4),
embed_dim=144,
num_heads=2,
use_cls_token=True,
expand_attn=True,
),
mvitv2_huge_cls=MultiScaleVitCfg(
depths=(4, 8, 60, 8),
embed_dim=192,
num_heads=3,
use_cls_token=True,
expand_attn=True,
),
)
def _create_mvitv2(variant, cfg_variant=None, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 4)
return build_model_with_cfg(
MultiScaleVit,
variant,
pretrained,
model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant],
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = generate_default_cfgs({
'mvitv2_tiny.fb_in1k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_small.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_base.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_large.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth',
hf_hub_id='timm/'),
'mvitv2_small_cls': _cfg(url=''),
'mvitv2_base_cls.fb_inw21k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in21k.pyth',
hf_hub_id='timm/',
num_classes=19168),
'mvitv2_large_cls.fb_inw21k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in21k.pyth',
hf_hub_id='timm/',
num_classes=19168),
'mvitv2_huge_cls.fb_inw21k': _cfg(
url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_H_in21k.pyth',
hf_hub_id='timm/',
num_classes=19168),
})
@register_model
def mvitv2_tiny(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_tiny', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_small(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_small', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_base(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_base', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_large(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_large', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_small_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_small_cls', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_base_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_base_cls', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_large_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_large_cls', pretrained=pretrained, **kwargs)
@register_model
def mvitv2_huge_cls(pretrained=False, **kwargs) -> MultiScaleVit:
return _create_mvitv2('mvitv2_huge_cls', pretrained=pretrained, **kwargs)
| pytorch-image-models/timm/models/mvitv2.py/0 | {
"file_path": "pytorch-image-models/timm/models/mvitv2.py",
"repo_id": "pytorch-image-models",
"token_count": 21273
} | 202 |
"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization.
A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfer (BiT) source code
at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have
been included here as pretrained models from their original .NPZ checkpoints.
Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and
extra padding support to allow porting of official Hybrid ResNet pretrained weights from
https://github.com/google-research/vision_transformer
Thanks to the Google team for the above two repositories and associated papers:
* Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370
* An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929
* Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237
Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020.
"""
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict # pylint: disable=g-importing-member
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import GroupNormAct, BatchNormAct2d, EvoNorm2dS0, FilterResponseNormTlu2d, ClassifierHead, \
DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d, get_act_layer, get_norm_act_layer, make_divisible
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq, named_apply, adapt_input_conv
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['ResNetV2'] # model_registry will add each entrypoint fn to this
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(
self,
in_chs,
out_chs=None,
bottle_ratio=0.25,
stride=1,
dilation=1,
first_dilation=None,
groups=1,
act_layer=None,
conv_layer=None,
norm_layer=None,
proj_layer=None,
drop_path_rate=0.,
):
super().__init__()
first_dilation = first_dilation or dilation
conv_layer = conv_layer or StdConv2d
norm_layer = norm_layer or partial(GroupNormAct, num_groups=32)
out_chs = out_chs or in_chs
mid_chs = make_divisible(out_chs * bottle_ratio)
if proj_layer is not None:
self.downsample = proj_layer(
in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True,
conv_layer=conv_layer, norm_layer=norm_layer)
else:
self.downsample = None
self.norm1 = norm_layer(in_chs)
self.conv1 = conv_layer(in_chs, mid_chs, 1)
self.norm2 = norm_layer(mid_chs)
self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
self.norm3 = norm_layer(mid_chs)
self.conv3 = conv_layer(mid_chs, out_chs, 1)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
def zero_init_last(self):
nn.init.zeros_(self.conv3.weight)
def forward(self, x):
x_preact = self.norm1(x)
# shortcut branch
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x_preact)
# residual branch
x = self.conv1(x_preact)
x = self.conv2(self.norm2(x))
x = self.conv3(self.norm3(x))
x = self.drop_path(x)
return x + shortcut
class Bottleneck(nn.Module):
"""Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT.
"""
def __init__(
self,
in_chs,
out_chs=None,
bottle_ratio=0.25,
stride=1,
dilation=1,
first_dilation=None,
groups=1,
act_layer=None,
conv_layer=None,
norm_layer=None,
proj_layer=None,
drop_path_rate=0.,
):
super().__init__()
first_dilation = first_dilation or dilation
act_layer = act_layer or nn.ReLU
conv_layer = conv_layer or StdConv2d
norm_layer = norm_layer or partial(GroupNormAct, num_groups=32)
out_chs = out_chs or in_chs
mid_chs = make_divisible(out_chs * bottle_ratio)
if proj_layer is not None:
self.downsample = proj_layer(
in_chs, out_chs, stride=stride, dilation=dilation, preact=False,
conv_layer=conv_layer, norm_layer=norm_layer)
else:
self.downsample = None
self.conv1 = conv_layer(in_chs, mid_chs, 1)
self.norm1 = norm_layer(mid_chs)
self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
self.norm2 = norm_layer(mid_chs)
self.conv3 = conv_layer(mid_chs, out_chs, 1)
self.norm3 = norm_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.act3 = act_layer(inplace=True)
def zero_init_last(self):
if getattr(self.norm3, 'weight', None) is not None:
nn.init.zeros_(self.norm3.weight)
def forward(self, x):
# shortcut branch
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
# residual
x = self.conv1(x)
x = self.norm1(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.drop_path(x)
x = self.act3(x + shortcut)
return x
class DownsampleConv(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=1,
first_dilation=None,
preact=True,
conv_layer=None,
norm_layer=None,
):
super(DownsampleConv, self).__init__()
self.conv = conv_layer(in_chs, out_chs, 1, stride=stride)
self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False)
def forward(self, x):
return self.norm(self.conv(x))
class DownsampleAvg(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=1,
dilation=1,
first_dilation=None,
preact=True,
conv_layer=None,
norm_layer=None,
):
""" AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment."""
super(DownsampleAvg, self).__init__()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = conv_layer(in_chs, out_chs, 1, stride=1)
self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False)
def forward(self, x):
return self.norm(self.conv(self.pool(x)))
class ResNetStage(nn.Module):
"""ResNet Stage."""
def __init__(
self,
in_chs,
out_chs,
stride,
dilation,
depth,
bottle_ratio=0.25,
groups=1,
avg_down=False,
block_dpr=None,
block_fn=PreActBottleneck,
act_layer=None,
conv_layer=None,
norm_layer=None,
**block_kwargs,
):
super(ResNetStage, self).__init__()
first_dilation = 1 if dilation in (1, 2) else 2
layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer)
proj_layer = DownsampleAvg if avg_down else DownsampleConv
prev_chs = in_chs
self.blocks = nn.Sequential()
for block_idx in range(depth):
drop_path_rate = block_dpr[block_idx] if block_dpr else 0.
stride = stride if block_idx == 0 else 1
self.blocks.add_module(str(block_idx), block_fn(
prev_chs,
out_chs,
stride=stride,
dilation=dilation,
bottle_ratio=bottle_ratio,
groups=groups,
first_dilation=first_dilation,
proj_layer=proj_layer,
drop_path_rate=drop_path_rate,
**layer_kwargs,
**block_kwargs,
))
prev_chs = out_chs
first_dilation = dilation
proj_layer = None
def forward(self, x):
x = self.blocks(x)
return x
def is_stem_deep(stem_type):
return any([s in stem_type for s in ('deep', 'tiered')])
def create_resnetv2_stem(
in_chs,
out_chs=64,
stem_type='',
preact=True,
conv_layer=StdConv2d,
norm_layer=partial(GroupNormAct, num_groups=32),
):
stem = OrderedDict()
assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered')
# NOTE conv padding mode can be changed by overriding the conv_layer def
if is_stem_deep(stem_type):
# A 3 deep 3x3 conv stack as in ResNet V1D models
if 'tiered' in stem_type:
stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py
else:
stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets
stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2)
stem['norm1'] = norm_layer(stem_chs[0])
stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1)
stem['norm2'] = norm_layer(stem_chs[1])
stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1)
if not preact:
stem['norm3'] = norm_layer(out_chs)
else:
# The usual 7x7 stem conv
stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2)
if not preact:
stem['norm'] = norm_layer(out_chs)
if 'fixed' in stem_type:
# 'fixed' SAME padding approximation that is used in BiT models
stem['pad'] = nn.ConstantPad2d(1, 0.)
stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
elif 'same' in stem_type:
# full, input size based 'SAME' padding, used in ViT Hybrid model
stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same')
else:
# the usual PyTorch symmetric padding
stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
return nn.Sequential(stem)
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode.
"""
def __init__(
self,
layers,
channels=(256, 512, 1024, 2048),
num_classes=1000,
in_chans=3,
global_pool='avg',
output_stride=32,
width_factor=1,
stem_chs=64,
stem_type='',
avg_down=False,
preact=True,
act_layer=nn.ReLU,
norm_layer=partial(GroupNormAct, num_groups=32),
conv_layer=StdConv2d,
drop_rate=0.,
drop_path_rate=0.,
zero_init_last=False,
):
"""
Args:
layers (List[int]) : number of layers in each block
channels (List[int]) : number of channels in each block:
num_classes (int): number of classification classes (default 1000)
in_chans (int): number of input (color) channels. (default 3)
global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg')
output_stride (int): output stride of the network, 32, 16, or 8. (default 32)
width_factor (int): channel (width) multiplication factor
stem_chs (int): stem width (default: 64)
stem_type (str): stem type (default: '' == 7x7)
avg_down (bool): average pooling in residual downsampling (default: False)
preact (bool): pre-activiation (default: True)
act_layer (Union[str, nn.Module]): activation layer
norm_layer (Union[str, nn.Module]): normalization layer
conv_layer (nn.Module): convolution module
drop_rate: classifier dropout rate (default: 0.)
drop_path_rate: stochastic depth rate (default: 0.)
zero_init_last: zero-init last weight in residual path (default: False)
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
wf = width_factor
norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer)
act_layer = get_act_layer(act_layer)
self.feature_info = []
stem_chs = make_divisible(stem_chs * wf)
self.stem = create_resnetv2_stem(
in_chans,
stem_chs,
stem_type,
preact,
conv_layer=conv_layer,
norm_layer=norm_layer,
)
stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm'
self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat))
prev_chs = stem_chs
curr_stride = 4
dilation = 1
block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)]
block_fn = PreActBottleneck if preact else Bottleneck
self.stages = nn.Sequential()
for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)):
out_chs = make_divisible(c * wf)
stride = 1 if stage_idx == 0 else 2
if curr_stride >= output_stride:
dilation *= stride
stride = 1
stage = ResNetStage(
prev_chs,
out_chs,
stride=stride,
dilation=dilation,
depth=d,
avg_down=avg_down,
act_layer=act_layer,
conv_layer=conv_layer,
norm_layer=norm_layer,
block_dpr=bdpr,
block_fn=block_fn,
)
prev_chs = out_chs
curr_stride *= stride
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')]
self.stages.add_module(str(stage_idx), stage)
self.num_features = self.head_hidden_size = prev_chs
self.norm = norm_layer(self.num_features) if preact else nn.Identity()
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
use_conv=True,
)
self.init_weights(zero_init_last=zero_init_last)
self.grad_checkpointing = False
@torch.jit.ignore
def init_weights(self, zero_init_last=True):
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix='resnet/'):
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x, flatten=True)
else:
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _init_weights(module: nn.Module, name: str = '', zero_init_last=True):
if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif zero_init_last and hasattr(module, 'zero_init_last'):
module.zero_init_last()
@torch.no_grad()
def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'):
import numpy as np
def t2p(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
weights = np.load(checkpoint_path)
stem_conv_w = adapt_input_conv(
model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel']))
model.stem.conv.weight.copy_(stem_conv_w)
model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma']))
model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta']))
if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \
model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]:
model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel']))
model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias']))
for i, (sname, stage) in enumerate(model.stages.named_children()):
for j, (bname, block) in enumerate(stage.blocks.named_children()):
cname = 'standardized_conv2d'
block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/'
block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel']))
block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel']))
block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel']))
block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma']))
block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma']))
block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma']))
block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta']))
block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta']))
block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta']))
if block.downsample is not None:
w = weights[f'{block_prefix}a/proj/{cname}/kernel']
block.downsample.conv.weight.copy_(t2p(w))
def _create_resnetv2(variant, pretrained=False, **kwargs):
feature_cfg = dict(flatten_sequential=True)
return build_model_with_cfg(
ResNetV2, variant, pretrained,
feature_cfg=feature_cfg,
**kwargs,
)
def _create_resnetv2_bit(variant, pretrained=False, **kwargs):
return _create_resnetv2(
variant,
pretrained=pretrained,
stem_type='fixed',
conv_layer=partial(StdConv2d, eps=1e-8),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
# Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237
'resnetv2_50x1_bit.goog_distilled_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', custom_load=True),
'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', custom_load=True),
'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic', custom_load=True),
# pretrained on imagenet21k, finetuned on imagenet1k
'resnetv2_50x1_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_50x3_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_101x1_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_101x3_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_152x2_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True),
'resnetv2_152x4_bit.goog_in21k_ft_in1k': _cfg(
hf_hub_id='timm/',
input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0, custom_load=True), # only one at 480x480?
# trained on imagenet-21k
'resnetv2_50x1_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_50x3_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_101x1_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_101x3_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_152x2_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_152x4_bit.goog_in21k': _cfg(
hf_hub_id='timm/',
num_classes=21843, custom_load=True),
'resnetv2_50.a1h_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_50d.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_50t.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_101.a1h_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_101d.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_152.untrained': _cfg(
interpolation='bicubic'),
'resnetv2_152d.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
'resnetv2_50d_gn.ah_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', first_conv='stem.conv1',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_50d_evos.ah_in1k': _cfg(
hf_hub_id='timm/',
interpolation='bicubic', first_conv='stem.conv1',
crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0),
'resnetv2_50d_frn.untrained': _cfg(
interpolation='bicubic', first_conv='stem.conv1'),
})
@register_model
def resnetv2_50x1_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_50x1_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs)
@register_model
def resnetv2_50x3_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_50x3_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs)
@register_model
def resnetv2_101x1_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_101x1_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs)
@register_model
def resnetv2_101x3_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_101x3_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs)
@register_model
def resnetv2_152x2_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_152x2_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs)
@register_model
def resnetv2_152x4_bit(pretrained=False, **kwargs) -> ResNetV2:
return _create_resnetv2_bit(
'resnetv2_152x4_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs)
@register_model
def resnetv2_50(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
return _create_resnetv2('resnetv2_50', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50d(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50t(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='tiered', avg_down=True)
return _create_resnetv2('resnetv2_50t', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_101(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
return _create_resnetv2('resnetv2_101', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_101d(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_101d', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_152(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d)
return _create_resnetv2('resnetv2_152', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_152d(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_152d', pretrained=pretrained, **dict(model_args, **kwargs))
# Experimental configs (may change / be removed)
@register_model
def resnetv2_50d_gn(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d_gn', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50d_evos(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d_evos', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def resnetv2_50d_frn(pretrained=False, **kwargs) -> ResNetV2:
model_args = dict(
layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d,
stem_type='deep', avg_down=True)
return _create_resnetv2('resnetv2_50d_frn', pretrained=pretrained, **dict(model_args, **kwargs))
register_model_deprecations(__name__, {
'resnetv2_50x1_bitm': 'resnetv2_50x1_bit.goog_in21k_ft_in1k',
'resnetv2_50x3_bitm': 'resnetv2_50x3_bit.goog_in21k_ft_in1k',
'resnetv2_101x1_bitm': 'resnetv2_101x1_bit.goog_in21k_ft_in1k',
'resnetv2_101x3_bitm': 'resnetv2_101x3_bit.goog_in21k_ft_in1k',
'resnetv2_152x2_bitm': 'resnetv2_152x2_bit.goog_in21k_ft_in1k',
'resnetv2_152x4_bitm': 'resnetv2_152x4_bit.goog_in21k_ft_in1k',
'resnetv2_50x1_bitm_in21k': 'resnetv2_50x1_bit.goog_in21k',
'resnetv2_50x3_bitm_in21k': 'resnetv2_50x3_bit.goog_in21k',
'resnetv2_101x1_bitm_in21k': 'resnetv2_101x1_bit.goog_in21k',
'resnetv2_101x3_bitm_in21k': 'resnetv2_101x3_bit.goog_in21k',
'resnetv2_152x2_bitm_in21k': 'resnetv2_152x2_bit.goog_in21k',
'resnetv2_152x4_bitm_in21k': 'resnetv2_152x4_bit.goog_in21k',
'resnetv2_50x1_bit_distilled': 'resnetv2_50x1_bit.goog_distilled_in1k',
'resnetv2_152x2_bit_teacher': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k',
'resnetv2_152x2_bit_teacher_384': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384',
})
| pytorch-image-models/timm/models/resnetv2.py/0 | {
"file_path": "pytorch-image-models/timm/models/resnetv2.py",
"repo_id": "pytorch-image-models",
"token_count": 14715
} | 203 |
""" Hybrid Vision Transformer (ViT) in PyTorch
A PyTorch implement of the Hybrid Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https://arxiv.org/abs/2106.10270
NOTE These hybrid model definitions depend on code in vision_transformer.py.
They were moved here to keep file sizes sane.
Hacked together by / Copyright 2020, Ross Wightman
"""
import math
from functools import partial
from typing import Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import StdConv2dSame, StdConv2d, ConvNormAct, to_2tuple, to_ntuple, HybridEmbed
from ._builder import build_model_with_cfg
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
from .resnet import resnet26d, resnet50d
from .resnetv2 import ResNetV2, create_resnetv2_stem
from .vision_transformer import VisionTransformer
class ConvStem(nn.Sequential):
def __init__(
self,
in_chans: int = 3,
depth: int = 3,
channels: Union[int, Tuple[int, ...]] = 64,
kernel_size: Union[int, Tuple[int, ...]] = 3,
stride: Union[int, Tuple[int, ...]] = (2, 2, 2),
padding: Union[str, int, Tuple[int, ...]] = "",
norm_layer: Type[nn.Module] = nn.BatchNorm2d,
act_layer: Type[nn.Module] = nn.ReLU,
):
super().__init__()
if isinstance(channels, int):
# a default tiered channel strategy
channels = tuple([channels // 2**i for i in range(depth)][::-1])
kernel_size = to_ntuple(depth)(kernel_size)
padding = to_ntuple(depth)(padding)
assert depth == len(stride) == len(kernel_size) == len(channels)
in_chs = in_chans
for i in range(len(channels)):
last_conv = i == len(channels) - 1
self.add_module(f'{i}', ConvNormAct(
in_chs,
channels[i],
kernel_size=kernel_size[i],
stride=stride[i],
padding=padding[i],
bias=last_conv,
apply_norm=not last_conv,
apply_act=not last_conv,
norm_layer=norm_layer,
act_layer=act_layer,
))
in_chs = channels[i]
def _resnetv2(layers=(3, 4, 9), **kwargs):
""" ResNet-V2 backbone helper"""
padding_same = kwargs.get('padding_same', True)
stem_type = 'same' if padding_same else ''
conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8)
if len(layers):
backbone = ResNetV2(
layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3),
preact=False, stem_type=stem_type, conv_layer=conv_layer)
else:
backbone = create_resnetv2_stem(
kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer)
return backbone
def _convert_mobileclip(state_dict, model, prefix='image_encoder.model.'):
out = {}
for k, v in state_dict.items():
if not k.startswith(prefix):
continue
k = k.replace(prefix, '')
k = k.replace('patch_emb.', 'patch_embed.backbone.')
k = k.replace('block.conv', 'conv')
k = k.replace('block.norm', 'bn')
k = k.replace('post_transformer_norm.', 'norm.')
k = k.replace('pre_norm_mha.0', 'norm1')
k = k.replace('pre_norm_mha.1', 'attn')
k = k.replace('pre_norm_ffn.0', 'norm2')
k = k.replace('pre_norm_ffn.1', 'mlp.fc1')
k = k.replace('pre_norm_ffn.4', 'mlp.fc2')
k = k.replace('qkv_proj.', 'qkv.')
k = k.replace('out_proj.', 'proj.')
k = k.replace('transformer.', 'blocks.')
if k == 'pos_embed.pos_embed.pos_embed':
k = 'pos_embed'
v = v.squeeze(0)
if 'classifier.proj' in k:
bias_k = k.replace('classifier.proj', 'head.bias')
k = k.replace('classifier.proj', 'head.weight')
v = v.T
out[bias_k] = torch.zeros(v.shape[0])
out[k] = v
return out
def checkpoint_filter_fn(
state_dict: Dict[str, torch.Tensor],
model: VisionTransformer,
interpolation: str = 'bicubic',
antialias: bool = True,
) -> Dict[str, torch.Tensor]:
from .vision_transformer import checkpoint_filter_fn as _filter_fn
if 'image_encoder.model.patch_emb.0.block.conv.weight' in state_dict:
state_dict = _convert_mobileclip(state_dict, model)
return _filter_fn(state_dict, model, interpolation=interpolation, antialias=antialias)
def _create_vision_transformer_hybrid(variant, backbone, embed_args=None, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
embed_args = embed_args or {}
embed_layer = partial(HybridEmbed, backbone=backbone, **embed_args)
kwargs.setdefault('embed_layer', embed_layer)
kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set
return build_model_with_cfg(
VisionTransformer,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# hybrid in-1k models (weights from official JAX impl where they exist)
'vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True,
first_conv='patch_embed.backbone.conv'),
'vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True),
'vit_small_r26_s32_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz',
hf_hub_id='timm/',
custom_load=True,
),
'vit_small_r26_s32_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0, custom_load=True),
'vit_base_r26_s32_224.untrained': _cfg(),
'vit_base_r50_s16_384.orig_in21k_ft_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0),
'vit_large_r50_s32_224.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz',
hf_hub_id='timm/',
custom_load=True,
),
'vit_large_r50_s32_384.augreg_in21k_ft_in1k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0, custom_load=True,
),
# hybrid in-21k models (weights from official Google JAX impl where they exist)
'vit_tiny_r_s16_p8_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv', custom_load=True),
'vit_small_r26_s32_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
num_classes=21843, crop_pct=0.9, custom_load=True),
'vit_base_r50_s16_224.orig_in21k': _cfg(
#url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth',
hf_hub_id='timm/',
num_classes=0, crop_pct=0.9),
'vit_large_r50_s32_224.augreg_in21k': _cfg(
url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz',
hf_hub_id='timm/',
num_classes=21843, crop_pct=0.9, custom_load=True),
# hybrid models (using timm resnet backbones)
'vit_small_resnet26d_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
'vit_small_resnet50d_s16_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
'vit_base_resnet26d_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
'vit_base_resnet50d_224.untrained': _cfg(
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'),
'vit_base_mci_224.apple_mclip_lt': _cfg(
hf_hub_id='apple/mobileclip_b_lt_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_blt.pt',
num_classes=512,
mean=(0., 0., 0.), std=(1., 1., 1.), first_conv='patch_embed.backbone.0.conv',
),
'vit_base_mci_224.apple_mclip': _cfg(
hf_hub_id='apple/mobileclip_b_timm',
url='https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_b.pt',
num_classes=512,
mean=(0., 0., 0.), std=(1., 1., 1.), first_conv='patch_embed.backbone.0.conv',
),
})
@register_model
def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224.
"""
backbone = _resnetv2(layers=(), **kwargs)
model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3)
model = _create_vision_transformer_hybrid(
'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384.
"""
backbone = _resnetv2(layers=(), **kwargs)
model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3)
model = _create_vision_transformer_hybrid(
'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R26+ViT-S/S32 hybrid.
"""
backbone = _resnetv2((2, 2, 2, 2), **kwargs)
model_args = dict(embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer_hybrid(
'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_r26_s32_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R26+ViT-S/S32 hybrid.
"""
backbone = _resnetv2((2, 2, 2, 2), **kwargs)
model_args = dict(embed_dim=384, depth=12, num_heads=6)
model = _create_vision_transformer_hybrid(
'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R26+ViT-B/S32 hybrid.
"""
backbone = _resnetv2((2, 2, 2, 2), **kwargs)
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_r50_s16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
"""
backbone = _resnetv2((3, 4, 9), **kwargs)
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_r50_s16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
backbone = _resnetv2((3, 4, 9), **kwargs)
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_r50_s32_224(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-L/S32 hybrid.
"""
backbone = _resnetv2((3, 4, 6, 3), **kwargs)
model_args = dict(embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer_hybrid(
'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_large_r50_s32_384(pretrained=False, **kwargs) -> VisionTransformer:
""" R50+ViT-L/S32 hybrid.
"""
backbone = _resnetv2((3, 4, 6, 3), **kwargs)
model_args = dict(embed_dim=1024, depth=24, num_heads=16)
model = _create_vision_transformer_hybrid(
'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4])
model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3)
model = _create_vision_transformer_hybrid(
'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_small_resnet50d_s16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
"""
backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3])
model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3)
model = _create_vision_transformer_hybrid(
'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
"""
backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4])
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_resnet50d_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4])
model_args = dict(embed_dim=768, depth=12, num_heads=12)
model = _create_vision_transformer_hybrid(
'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_base_mci_224(pretrained=False, **kwargs) -> VisionTransformer:
""" Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
"""
backbone = ConvStem(
channels=(768//4, 768//4, 768),
stride=(4, 2, 2),
kernel_size=(4, 2, 2),
padding=0,
in_chans=kwargs.get('in_chans', 3),
act_layer=nn.GELU,
)
model_args = dict(embed_dim=768, depth=12, num_heads=12, no_embed_class=True)
model = _create_vision_transformer_hybrid(
'vit_base_mci_224', backbone=backbone, embed_args=dict(proj=False),
pretrained=pretrained, **dict(model_args, **kwargs)
)
return model
register_model_deprecations(__name__, {
'vit_tiny_r_s16_p8_224_in21k': 'vit_tiny_r_s16_p8_224.augreg_in21k',
'vit_small_r26_s32_224_in21k': 'vit_small_r26_s32_224.augreg_in21k',
'vit_base_r50_s16_224_in21k': 'vit_base_r50_s16_224.orig_in21k',
'vit_base_resnet50_224_in21k': 'vit_base_r50_s16_224.orig_in21k',
'vit_large_r50_s32_224_in21k': 'vit_large_r50_s32_224.augreg_in21k',
'vit_base_resnet50_384': 'vit_base_r50_s16_384.orig_in21k_ft_in1k'
})
| pytorch-image-models/timm/models/vision_transformer_hybrid.py/0 | {
"file_path": "pytorch-image-models/timm/models/vision_transformer_hybrid.py",
"repo_id": "pytorch-image-models",
"token_count": 8273
} | 204 |
""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb
This optimizer code was adapted from the following (starting with latest)
* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py
* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
* https://github.com/cybertronai/pytorch-lamb
Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is
similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX.
In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU.
Original copyrights for above sources are below.
Modifications Copyright 2021 Ross Wightman
"""
# Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
"""Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB
reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging (bool, optional): whether apply (1-beta2) to grad when
calculating running averages of gradient. (default: True)
max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0)
trust_clip (bool): enable LAMBC trust ratio clipping (default: False)
always_adapt (boolean, optional): Apply adaptive learning rate to 0.0
weight decay parameter (default: False)
.. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(
self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False):
defaults = dict(
lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay,
grad_averaging=grad_averaging, max_grad_norm=max_grad_norm,
trust_clip=trust_clip, always_adapt=always_adapt)
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
device = self.param_groups[0]['params'][0].device
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
global_grad_norm = torch.zeros(1, device=device)
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
# FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes
# scalar types properly https://github.com/pytorch/pytorch/issues/9190
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
clip_global_grad_norm = torch.where(
global_grad_norm > max_grad_norm,
global_grad_norm / max_grad_norm,
one_tensor)
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
grad_averaging = 1 if group['grad_averaging'] else 0
beta3 = 1 - beta1 if grad_averaging else 1.0
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
if bias_correction:
bias_correction1 = 1 - beta1 ** group['step']
bias_correction2 = 1 - beta2 ** group['step']
else:
bias_correction1, bias_correction2 = 1.0, 1.0
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.div_(clip_global_grad_norm)
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient valuesa
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
update = (exp_avg / bias_correction1).div_(denom)
weight_decay = group['weight_decay']
if weight_decay != 0:
update.add_(p, alpha=weight_decay)
if weight_decay != 0 or group['always_adapt']:
# Layer-wise LR adaptation. By default, skip adaptation on parameters that are
# excluded from weight decay, unless always_adapt == True, then always enabled.
w_norm = p.norm(2.0)
g_norm = update.norm(2.0)
# FIXME nested where required since logical and/or not working in PT XLA
trust_ratio = torch.where(
w_norm > 0,
torch.where(g_norm > 0, w_norm / g_norm, one_tensor),
one_tensor,
)
if group['trust_clip']:
# LAMBC trust clipping, upper bound fixed at one
trust_ratio = torch.minimum(trust_ratio, one_tensor)
update.mul_(trust_ratio)
p.add_(update, alpha=-group['lr'])
return loss
| pytorch-image-models/timm/optim/lamb.py/0 | {
"file_path": "pytorch-image-models/timm/optim/lamb.py",
"repo_id": "pytorch-image-models",
"token_count": 3768
} | 205 |
""" Plateau Scheduler
Adapts PyTorch plateau scheduler and allows application of noise, warmup.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
from typing import List
from .scheduler import Scheduler
class PlateauLRScheduler(Scheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(
self,
optimizer,
decay_rate=0.1,
patience_t=10,
verbose=True,
threshold=1e-4,
cooldown_t=0,
warmup_t=0,
warmup_lr_init=0,
lr_min=0,
mode='max',
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize=True,
):
super().__init__(
optimizer,
'lr',
noise_range_t=noise_range_t,
noise_type=noise_type,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer,
patience=patience_t,
factor=decay_rate,
verbose=verbose,
threshold=threshold,
cooldown=cooldown_t,
mode=mode,
min_lr=lr_min
)
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
self.restore_lr = None
def state_dict(self):
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
# override the base class step fn completely
def step(self, epoch, metric=None):
if epoch <= self.warmup_t:
lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps]
super().update_groups(lrs)
else:
if self.restore_lr is not None:
# restore actual LR from before our last noise perturbation before stepping base
for i, param_group in enumerate(self.optimizer.param_groups):
param_group['lr'] = self.restore_lr[i]
self.restore_lr = None
self.lr_scheduler.step(metric, epoch) # step the base scheduler
if self._is_apply_noise(epoch):
self._apply_noise(epoch)
def step_update(self, num_updates: int, metric: float = None):
return None
def _apply_noise(self, epoch):
noise = self._calculate_noise(epoch)
# apply the noise on top of previous LR, cache the old value so we can restore for normal
# stepping of base scheduler
restore_lr = []
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
restore_lr.append(old_lr)
new_lr = old_lr + old_lr * noise
param_group['lr'] = new_lr
self.restore_lr = restore_lr
def _get_lr(self, t: int) -> List[float]:
assert False, 'should not be called as step is overridden'
| pytorch-image-models/timm/scheduler/plateau_lr.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/plateau_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 1807
} | 206 |
""" Eval metrics and related
Hacked together by / Copyright 2020 Ross Wightman
"""
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = min(max(topk), output.size()[1])
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
| pytorch-image-models/timm/utils/metrics.py/0 | {
"file_path": "pytorch-image-models/timm/utils/metrics.py",
"repo_id": "pytorch-image-models",
"token_count": 374
} | 207 |
# All the tooling for CUDA
FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04 AS cuda-builder
WORKDIR /usr/src/tgi/backends/trtllm
RUN apt update && apt install -y cmake git git-lfs gcc g++ ninja-build libopenmpi-dev python3-dev python3-pip wget
COPY . /usr/src/tgi
RUN chmod +x scripts/install_tensorrt.sh && scripts/install_tensorrt.sh
RUN cmake -G Ninja -B build -DTRT_LIB_DIR=/usr/local/tensorrt/lib -DTRT_INCLUDE_DIR=/usr/local/tensorrt/include .
RUN cmake --build build --parallel -t tgi_trtllm_backend_impl
# All the tooling for Rust
FROM lukemathwalker/cargo-chef:latest-rust-1.79 AS chef
WORKDIR /usr/src
# Include CUDA related libraries and tools to the Rust based image
COPY --from=cuda-builder /usr/local/cuda /usr/local/cuda
COPY --from=cuda-builder /usr/local/tensorrt /usr/local/tensorrt
COPY --from=cuda-builder /usr/src/tgi/backends/trtllm/build /usr/local/tgi/trtllm/build
ENV PATH=/usr/local/cuda/bin:$PATH
ENV LD_LIBRARY_PATH=/usr/local/tensorrt/lib:$LD_LIBRARY_PATH
RUN apt update && apt install -y cmake git gcc g++ ninja-build libopenmpi3
| text-generation-inference/Dockerfile.trtllm/0 | {
"file_path": "text-generation-inference/Dockerfile.trtllm",
"repo_id": "text-generation-inference",
"token_count": 432
} | 208 |
//
// Created by mfuntowicz on 7/11/24.
//
#ifndef TGI_TRTLLM_BACKEND_FFI_H
#define TGI_TRTLLM_BACKEND_FFI_H
#include <cstddef>
#include "backend.h"
namespace huggingface::tgi::backends {
class TensorRtLlmBackendImpl;
}
#include "backends/trtllm/src/lib.rs.h"
namespace huggingface::tgi::backends {
// struct GenerationContext;
class TensorRtLlmBackendImpl : public TensorRtLlmBackend {
public:
/***
*
* @param engineFolder
* @param executorWorker
*/
TensorRtLlmBackendImpl(const std::string_view &engineFolder, const std::string_view &executorWorker);
/***
*
* @return
*/
bool IsReady() const;
/***
*
* @param tokens
* @param topK
* @param topP
* @param temperature
* @param repetition_penalty
* @param frequency_penalty
* @param seed
* @return
*/
[[nodiscard("returned request id should be used to refer to the request's generation result later on")]]
uint64_t
Submit(rust::Slice<const uint32_t> tokens, int32_t topK, float_t topP, float_t temperature,
float_t repetition_penalty, float_t frequency_penalty, uint64_t seed);
/***
*
* @param requestId
* @param ctx
* @param callback
* @return
*/
size_t StreamTokens(
const RequestId requestId,
huggingface::tgi::backends::GenerationContext *ctx,
rust::Fn<void(huggingface::tgi::backends::GenerationContext *,
huggingface::tgi::backends::GenerationStep)> callback);
};
/***
*
* @param engineFolder
* @return
*/
std::unique_ptr<TensorRtLlmBackendImpl> CreateTensorRtLlmBackend(rust::Str engineFolder, rust::Str executorWorker);
}
#endif //TGI_TRTLLM_BACKEND_FFI_H
| text-generation-inference/backends/trtllm/include/ffi.h/0 | {
"file_path": "text-generation-inference/backends/trtllm/include/ffi.h",
"repo_id": "text-generation-inference",
"token_count": 946
} | 209 |
//! Text Generation gRPC client library
use async_trait::async_trait;
use thiserror::Error;
use tonic::transport;
use tonic::Status;
#[allow(clippy::derive_partial_eq_without_eq)]
mod pb;
mod grpc_client;
mod sharded_client;
pub use grpc_client::Client;
pub use pb::generate::v3::{
input_chunk::Chunk, Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType,
HealthResponse, Image, InfoResponse, Input, InputChunk, NextTokenChooserParameters, Request,
StoppingCriteriaParameters,
};
pub use sharded_client::ShardedClient;
#[async_trait]
pub trait Health {
/// Check if a generate server is healthy by asking it to allocate a tensor on device
async fn device_health(&self) -> Result<()>;
/// Check if a generate server is healthy by doing a forward pass.
/// EXPENSIVE
async fn model_health(&self) -> Result<()>;
}
#[derive(Debug)]
pub struct ShardInfo {
pub requires_padding: bool,
pub dtype: String,
pub device_type: String,
pub window_size: Option<u32>,
pub speculate: u32,
}
#[derive(Error, Debug, Clone)]
pub enum ClientError {
#[error("Could not connect to Text Generation server: {0}")]
Connection(String),
#[error("Server error: {0}")]
Generation(String),
#[error("Sharded results are empty")]
EmptyResults,
}
impl From<Status> for ClientError {
fn from(err: Status) -> Self {
let err = Self::Generation(err.message().to_string());
tracing::error!("{err}");
err
}
}
impl From<transport::Error> for ClientError {
fn from(err: transport::Error) -> Self {
let err = Self::Connection(err.to_string());
tracing::error!("{err}");
err
}
}
// Small convenience re-wrapping of `Chunk`.
impl From<Chunk> for InputChunk {
fn from(chunk: Chunk) -> Self {
InputChunk { chunk: Some(chunk) }
}
}
static WARMUP_IMAGE_BASE64 :&str = "iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAABg2lDQ1BJQ0MgcHJvZmlsZQAAKJF9kT1Iw0AcxV/TSotUROxQxCFDdbKLijjWKhShQqgVWnUwufQLmrQkKS6OgmvBwY/FqoOLs64OroIg+AHi7OCk6CIl/i8ptIjx4Lgf7+497t4BQqvKNDOQADTdMjKppJjLr4rBVwQQwhAERGVm1uckKQ3P8XUPH1/v4jzL+9yfY0AtmAzwicQJVjcs4g3imU2rznmfOMLKskp8Tjxh0AWJH7muuPzGueSwwDMjRjYzTxwhFks9rPQwKxsa8TRxTNV0yhdyLquctzhr1Qbr3JO/MFzQV5a5TnMUKSxiCRJEKGiggiosxGnVSTGRof2kh3/E8UvkUshVASPHAmrQIDt+8D/43a1ZnJp0k8JJoO/Ftj/GgOAu0G7a9vexbbdPAP8zcKV3/bUWMPtJerOrxY6AwW3g4rqrKXvA5Q4QfarLhuxIfppCsQi8n9E35YHhW6B/ze2ts4/TByBLXaVvgINDYLxE2ese7w719vbvmU5/PycecohsjayNAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH6AQIEQMnlTSSjwAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAASSURBVDjLY2AYBaNgFIyCoQsABMQAAeRw1DoAAAAASUVORK5CYII=";
pub type Result<T> = std::result::Result<T, ClientError>;
| text-generation-inference/backends/v3/src/client/mod.rs/0 | {
"file_path": "text-generation-inference/backends/v3/src/client/mod.rs",
"repo_id": "text-generation-inference",
"token_count": 1283
} | 210 |
unit-tests:
python -m pytest --cov=text_generation tests
install:
pip install pip --upgrade
pip install -e .
| text-generation-inference/clients/python/Makefile/0 | {
"file_path": "text-generation-inference/clients/python/Makefile",
"repo_id": "text-generation-inference",
"token_count": 41
} | 211 |
{
"openapi": "3.0.3",
"info": {
"title": "Text Generation Inference",
"description": "Text Generation Webserver",
"contact": {
"name": "Olivier Dehaene"
},
"license": {
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0"
},
"version": "2.2.1-dev0"
},
"paths": {
"/": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`",
"operationId": "compat_generate",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CompatGenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Text",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateResponse"
}
},
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/StreamResponse"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/generate": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens",
"operationId": "generate",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Text",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateResponse"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/generate_stream": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate a stream of token using Server-Sent Events",
"operationId": "generate_stream",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Text",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/StreamResponse"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/health": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Health check method",
"operationId": "health",
"responses": {
"200": {
"description": "Everything is working fine"
},
"503": {
"description": "Text generation inference is down",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "unhealthy",
"error_type": "healthcheck"
}
}
}
}
}
}
},
"/info": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Text Generation Inference endpoint info",
"operationId": "get_model_info",
"responses": {
"200": {
"description": "Served model info",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Info"
}
}
}
}
}
}
},
"/metrics": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Prometheus metrics scrape endpoint",
"operationId": "metrics",
"responses": {
"200": {
"description": "Prometheus Metrics",
"content": {
"text/plain": {
"schema": {
"type": "string"
}
}
}
}
}
}
},
"/tokenize": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Tokenize inputs",
"operationId": "tokenize",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/GenerateRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Tokenized ids",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/TokenizeResponse"
}
}
}
},
"404": {
"description": "No tokenizer found",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "No fast tokenizer available"
}
}
}
}
}
}
},
"/v1/chat/completions": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens",
"operationId": "chat_completions",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ChatRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Chat Completion",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ChatCompletion"
}
},
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/ChatCompletionChunk"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/v1/completions": {
"post": {
"tags": [
"Text Generation Inference"
],
"summary": "Generate tokens",
"operationId": "completions",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CompletionRequest"
}
}
},
"required": true
},
"responses": {
"200": {
"description": "Generated Chat Completion",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CompletionFinal"
}
},
"text/event-stream": {
"schema": {
"$ref": "#/components/schemas/Chunk"
}
}
}
},
"422": {
"description": "Input validation error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Input validation error"
}
}
}
},
"424": {
"description": "Generation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Request failed during generation"
}
}
}
},
"429": {
"description": "Model is overloaded",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Model is overloaded"
}
}
}
},
"500": {
"description": "Incomplete generation",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
},
"example": {
"error": "Incomplete generation"
}
}
}
}
}
}
},
"/v1/models": {
"get": {
"tags": [
"Text Generation Inference"
],
"summary": "Get model info",
"operationId": "openai_get_model_info",
"responses": {
"200": {
"description": "Served model info",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ModelInfo"
}
}
}
},
"404": {
"description": "Model not found",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ErrorResponse"
}
}
}
}
}
}
}
},
"components": {
"schemas": {
"BestOfSequence": {
"type": "object",
"required": [
"generated_text",
"finish_reason",
"generated_tokens",
"prefill",
"tokens"
],
"properties": {
"finish_reason": {
"$ref": "#/components/schemas/FinishReason"
},
"generated_text": {
"type": "string",
"example": "test"
},
"generated_tokens": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"prefill": {
"type": "array",
"items": {
"$ref": "#/components/schemas/PrefillToken"
}
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"tokens": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
},
"top_tokens": {
"type": "array",
"items": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
}
}
}
},
"ChatCompletion": {
"type": "object",
"required": [
"id",
"created",
"model",
"system_fingerprint",
"choices",
"usage"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionComplete"
}
},
"created": {
"type": "integer",
"format": "int64",
"example": "1706270835",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string",
"example": "mistralai/Mistral-7B-Instruct-v0.2"
},
"system_fingerprint": {
"type": "string"
},
"usage": {
"$ref": "#/components/schemas/Usage"
}
}
},
"ChatCompletionChoice": {
"type": "object",
"required": [
"index",
"delta"
],
"properties": {
"delta": {
"$ref": "#/components/schemas/ChatCompletionDelta"
},
"finish_reason": {
"type": "string",
"nullable": true
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"logprobs": {
"allOf": [
{
"$ref": "#/components/schemas/ChatCompletionLogprobs"
}
],
"nullable": true
}
}
},
"ChatCompletionChunk": {
"type": "object",
"required": [
"id",
"created",
"model",
"system_fingerprint",
"choices"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionChoice"
}
},
"created": {
"type": "integer",
"format": "int64",
"example": "1706270978",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string",
"example": "mistralai/Mistral-7B-Instruct-v0.2"
},
"system_fingerprint": {
"type": "string"
}
}
},
"ChatCompletionComplete": {
"type": "object",
"required": [
"index",
"message",
"finish_reason"
],
"properties": {
"finish_reason": {
"type": "string"
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"logprobs": {
"allOf": [
{
"$ref": "#/components/schemas/ChatCompletionLogprobs"
}
],
"nullable": true
},
"message": {
"$ref": "#/components/schemas/OutputMessage"
}
}
},
"ChatCompletionDelta": {
"oneOf": [
{
"$ref": "#/components/schemas/TextMessage"
},
{
"$ref": "#/components/schemas/ToolCallDelta"
}
]
},
"ChatCompletionLogprob": {
"type": "object",
"required": [
"token",
"logprob",
"top_logprobs"
],
"properties": {
"logprob": {
"type": "number",
"format": "float"
},
"token": {
"type": "string"
},
"top_logprobs": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionTopLogprob"
}
}
}
},
"ChatCompletionLogprobs": {
"type": "object",
"required": [
"content"
],
"properties": {
"content": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ChatCompletionLogprob"
}
}
}
},
"ChatCompletionTopLogprob": {
"type": "object",
"required": [
"token",
"logprob"
],
"properties": {
"logprob": {
"type": "number",
"format": "float"
},
"token": {
"type": "string"
}
}
},
"ChatRequest": {
"type": "object",
"required": [
"messages"
],
"properties": {
"frequency_penalty": {
"type": "number",
"format": "float",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
"example": "1.0",
"nullable": true
},
"guideline": {
"type": "string",
"description": "A guideline to be used in the chat_template",
"default": "null",
"example": "null",
"nullable": true
},
"logit_bias": {
"type": "array",
"items": {
"type": "number",
"format": "float"
},
"description": "UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.",
"nullable": true
},
"logprobs": {
"type": "boolean",
"description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.",
"example": "false",
"nullable": true
},
"max_tokens": {
"type": "integer",
"format": "int32",
"description": "The maximum number of tokens that can be generated in the chat completion.",
"example": "32",
"nullable": true,
"minimum": 0
},
"messages": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
"description": "A list of messages comprising the conversation so far.",
"example": "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]"
},
"model": {
"type": "string",
"description": "[UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
"example": "mistralai/Mistral-7B-Instruct-v0.2",
"nullable": true
},
"n": {
"type": "integer",
"format": "int32",
"description": "UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.",
"example": "2",
"nullable": true,
"minimum": 0
},
"presence_penalty": {
"type": "number",
"format": "float",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics",
"example": 0.1,
"nullable": true
},
"response_format": {
"allOf": [
{
"$ref": "#/components/schemas/GrammarType"
}
],
"default": "null",
"nullable": true
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"stop": {
"type": "array",
"items": {
"type": "string"
},
"description": "Up to 4 sequences where the API will stop generating further tokens.",
"example": "null",
"nullable": true
},
"stream": {
"type": "boolean"
},
"temperature": {
"type": "number",
"format": "float",
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.",
"example": 1.0,
"nullable": true
},
"tool_choice": {
"allOf": [
{
"$ref": "#/components/schemas/ToolChoice"
}
],
"nullable": true
},
"tool_prompt": {
"type": "string",
"description": "A prompt to be appended before the tools",
"example": "Given the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables.",
"nullable": true
},
"tools": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Tool"
},
"description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of\nfunctions the model may generate JSON inputs for.",
"example": "null",
"nullable": true
},
"top_logprobs": {
"type": "integer",
"format": "int32",
"description": "An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.",
"example": "5",
"nullable": true,
"minimum": 0
},
"top_p": {
"type": "number",
"format": "float",
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
"example": 0.95,
"nullable": true
}
}
},
"Chunk": {
"type": "object",
"required": [
"id",
"created",
"choices",
"model",
"system_fingerprint"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/CompletionComplete"
}
},
"created": {
"type": "integer",
"format": "int64",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string"
},
"system_fingerprint": {
"type": "string"
}
}
},
"CompatGenerateRequest": {
"type": "object",
"required": [
"inputs"
],
"properties": {
"inputs": {
"type": "string",
"example": "My name is Olivier and I"
},
"parameters": {
"$ref": "#/components/schemas/GenerateParameters"
},
"stream": {
"type": "boolean",
"default": "false"
}
}
},
"Completion": {
"oneOf": [
{
"allOf": [
{
"$ref": "#/components/schemas/Chunk"
},
{
"type": "object",
"required": [
"object"
],
"properties": {
"object": {
"type": "string",
"enum": [
"text_completion"
]
}
}
}
]
},
{
"allOf": [
{
"$ref": "#/components/schemas/CompletionFinal"
},
{
"type": "object",
"required": [
"object"
],
"properties": {
"object": {
"type": "string",
"enum": [
"text_completion"
]
}
}
}
]
}
],
"discriminator": {
"propertyName": "object"
}
},
"CompletionComplete": {
"type": "object",
"required": [
"index",
"text",
"finish_reason"
],
"properties": {
"finish_reason": {
"type": "string"
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"logprobs": {
"type": "array",
"items": {
"type": "number",
"format": "float"
},
"nullable": true
},
"text": {
"type": "string"
}
}
},
"CompletionFinal": {
"type": "object",
"required": [
"id",
"created",
"model",
"system_fingerprint",
"choices",
"usage"
],
"properties": {
"choices": {
"type": "array",
"items": {
"$ref": "#/components/schemas/CompletionComplete"
}
},
"created": {
"type": "integer",
"format": "int64",
"example": "1706270835",
"minimum": 0
},
"id": {
"type": "string"
},
"model": {
"type": "string",
"example": "mistralai/Mistral-7B-Instruct-v0.2"
},
"system_fingerprint": {
"type": "string"
},
"usage": {
"$ref": "#/components/schemas/Usage"
}
}
},
"CompletionRequest": {
"type": "object",
"required": [
"prompt"
],
"properties": {
"frequency_penalty": {
"type": "number",
"format": "float",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
"example": "1.0",
"nullable": true
},
"max_tokens": {
"type": "integer",
"format": "int32",
"description": "The maximum number of tokens that can be generated in the chat completion.",
"default": "32",
"nullable": true,
"minimum": 0
},
"model": {
"type": "string",
"description": "UNUSED\nID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
"example": "mistralai/Mistral-7B-Instruct-v0.2",
"nullable": true
},
"prompt": {
"$ref": "#/components/schemas/Prompt"
},
"repetition_penalty": {
"type": "number",
"format": "float",
"nullable": true
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"stop": {
"type": "array",
"items": {
"type": "string"
},
"description": "Up to 4 sequences where the API will stop generating further tokens.",
"example": "null",
"nullable": true
},
"stream": {
"type": "boolean"
},
"suffix": {
"type": "string",
"description": "The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.\nplease see the completion_template field in the model's tokenizer_config.json file for completion template.",
"nullable": true
},
"temperature": {
"type": "number",
"format": "float",
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.",
"example": 1.0,
"nullable": true
},
"top_p": {
"type": "number",
"format": "float",
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
"example": 0.95,
"nullable": true
}
}
},
"DeltaToolCall": {
"type": "object",
"required": [
"index",
"id",
"type",
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/Function"
},
"id": {
"type": "string"
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"type": {
"type": "string"
}
}
},
"Details": {
"type": "object",
"required": [
"finish_reason",
"generated_tokens",
"prefill",
"tokens"
],
"properties": {
"best_of_sequences": {
"type": "array",
"items": {
"$ref": "#/components/schemas/BestOfSequence"
},
"nullable": true
},
"finish_reason": {
"$ref": "#/components/schemas/FinishReason"
},
"generated_tokens": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"prefill": {
"type": "array",
"items": {
"$ref": "#/components/schemas/PrefillToken"
}
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
},
"tokens": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
},
"top_tokens": {
"type": "array",
"items": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
}
}
}
},
"ErrorResponse": {
"type": "object",
"required": [
"error",
"error_type"
],
"properties": {
"error": {
"type": "string"
},
"error_type": {
"type": "string"
}
}
},
"FinishReason": {
"type": "string",
"enum": [
"length",
"eos_token",
"stop_sequence"
],
"example": "Length"
},
"Function": {
"type": "object",
"required": [
"arguments"
],
"properties": {
"arguments": {
"type": "string"
},
"name": {
"type": "string",
"nullable": true
}
}
},
"FunctionDefinition": {
"type": "object",
"required": [
"name",
"arguments"
],
"properties": {
"arguments": {},
"description": {
"type": "string",
"nullable": true
},
"name": {
"type": "string"
}
}
},
"FunctionName": {
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"type": "string"
}
}
},
"GenerateParameters": {
"type": "object",
"properties": {
"adapter_id": {
"type": "string",
"description": "Lora adapter id",
"default": "null",
"example": "null",
"nullable": true
},
"best_of": {
"type": "integer",
"description": "Generate best_of sequences and return the one if the highest token logprobs.",
"default": "null",
"example": 1,
"nullable": true,
"minimum": 0,
"exclusiveMinimum": 0
},
"decoder_input_details": {
"type": "boolean",
"description": "Whether to return decoder input token logprobs and ids.",
"default": "false"
},
"details": {
"type": "boolean",
"description": "Whether to return generation details.",
"default": "true"
},
"do_sample": {
"type": "boolean",
"description": "Activate logits sampling.",
"default": "false",
"example": true
},
"frequency_penalty": {
"type": "number",
"format": "float",
"description": "The parameter for frequency penalty. 1.0 means no penalty\nPenalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.",
"default": "null",
"example": 0.1,
"nullable": true,
"exclusiveMinimum": -2
},
"grammar": {
"allOf": [
{
"$ref": "#/components/schemas/GrammarType"
}
],
"default": "null",
"nullable": true
},
"max_new_tokens": {
"type": "integer",
"format": "int32",
"description": "Maximum number of tokens to generate.",
"default": "100",
"example": "20",
"nullable": true,
"minimum": 0
},
"repetition_penalty": {
"type": "number",
"format": "float",
"description": "The parameter for repetition penalty. 1.0 means no penalty.\nSee [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.",
"default": "null",
"example": 1.03,
"nullable": true,
"exclusiveMinimum": 0
},
"return_full_text": {
"type": "boolean",
"description": "Whether to prepend the prompt to the generated text",
"default": "null",
"example": false,
"nullable": true
},
"seed": {
"type": "integer",
"format": "int64",
"description": "Random sampling seed.",
"default": "null",
"example": "null",
"nullable": true,
"minimum": 0,
"exclusiveMinimum": 0
},
"stop": {
"type": "array",
"items": {
"type": "string"
},
"description": "Stop generating tokens if a member of `stop` is generated.",
"example": [
"photographer"
],
"maxItems": 4
},
"temperature": {
"type": "number",
"format": "float",
"description": "The value used to module the logits distribution.",
"default": "null",
"example": 0.5,
"nullable": true,
"exclusiveMinimum": 0
},
"top_k": {
"type": "integer",
"format": "int32",
"description": "The number of highest probability vocabulary tokens to keep for top-k-filtering.",
"default": "null",
"example": 10,
"nullable": true,
"exclusiveMinimum": 0
},
"top_n_tokens": {
"type": "integer",
"format": "int32",
"description": "The number of highest probability vocabulary tokens to keep for top-n-filtering.",
"default": "null",
"example": 5,
"nullable": true,
"minimum": 0,
"exclusiveMinimum": 0
},
"top_p": {
"type": "number",
"format": "float",
"description": "Top-p value for nucleus sampling.",
"default": "null",
"example": 0.95,
"nullable": true,
"maximum": 1,
"exclusiveMinimum": 0
},
"truncate": {
"type": "integer",
"description": "Truncate inputs tokens to the given size.",
"default": "null",
"example": "null",
"nullable": true,
"minimum": 0
},
"typical_p": {
"type": "number",
"format": "float",
"description": "Typical Decoding mass\nSee [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.",
"default": "null",
"example": 0.95,
"nullable": true,
"maximum": 1,
"exclusiveMinimum": 0
},
"watermark": {
"type": "boolean",
"description": "Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226).",
"default": "false",
"example": true
}
}
},
"GenerateRequest": {
"type": "object",
"required": [
"inputs"
],
"properties": {
"inputs": {
"type": "string",
"example": "My name is Olivier and I"
},
"parameters": {
"$ref": "#/components/schemas/GenerateParameters"
}
}
},
"GenerateResponse": {
"type": "object",
"required": [
"generated_text"
],
"properties": {
"details": {
"allOf": [
{
"$ref": "#/components/schemas/Details"
}
],
"nullable": true
},
"generated_text": {
"type": "string",
"example": "test"
}
}
},
"GrammarType": {
"oneOf": [
{
"type": "object",
"required": [
"type",
"value"
],
"properties": {
"type": {
"type": "string",
"enum": [
"json"
]
},
"value": {
"description": "A string that represents a [JSON Schema](https://json-schema.org/).\n\nJSON Schema is a declarative language that allows to annotate JSON documents\nwith types and descriptions."
}
}
},
{
"type": "object",
"required": [
"type",
"value"
],
"properties": {
"type": {
"type": "string",
"enum": [
"regex"
]
},
"value": {
"type": "string"
}
}
}
],
"discriminator": {
"propertyName": "type"
}
},
"Info": {
"type": "object",
"required": [
"model_id",
"max_concurrent_requests",
"max_best_of",
"max_stop_sequences",
"max_input_tokens",
"max_total_tokens",
"validation_workers",
"max_client_batch_size",
"router",
"version"
],
"properties": {
"docker_label": {
"type": "string",
"example": "null",
"nullable": true
},
"max_best_of": {
"type": "integer",
"example": "2",
"minimum": 0
},
"max_client_batch_size": {
"type": "integer",
"example": "32",
"minimum": 0
},
"max_concurrent_requests": {
"type": "integer",
"description": "Router Parameters",
"example": "128",
"minimum": 0
},
"max_input_tokens": {
"type": "integer",
"example": "1024",
"minimum": 0
},
"max_stop_sequences": {
"type": "integer",
"example": "4",
"minimum": 0
},
"max_total_tokens": {
"type": "integer",
"example": "2048",
"minimum": 0
},
"model_id": {
"type": "string",
"description": "Model info",
"example": "bigscience/blomm-560m"
},
"model_pipeline_tag": {
"type": "string",
"example": "text-generation",
"nullable": true
},
"model_sha": {
"type": "string",
"example": "e985a63cdc139290c5f700ff1929f0b5942cced2",
"nullable": true
},
"router": {
"type": "string",
"description": "Router Info",
"example": "text-generation-router"
},
"sha": {
"type": "string",
"example": "null",
"nullable": true
},
"validation_workers": {
"type": "integer",
"example": "2",
"minimum": 0
},
"version": {
"type": "string",
"example": "0.5.0"
}
}
},
"Message": {
"type": "object",
"required": [
"role",
"content"
],
"properties": {
"content": {
"$ref": "#/components/schemas/MessageContent"
},
"name": {
"type": "string",
"example": "\"David\"",
"nullable": true
},
"role": {
"type": "string",
"example": "user"
}
}
},
"MessageChunk": {
"oneOf": [
{
"type": "object",
"required": [
"text",
"type"
],
"properties": {
"text": {
"type": "string"
},
"type": {
"type": "string",
"enum": [
"text"
]
}
}
},
{
"type": "object",
"required": [
"image_url",
"type"
],
"properties": {
"image_url": {
"$ref": "#/components/schemas/Url"
},
"type": {
"type": "string",
"enum": [
"image_url"
]
}
}
}
],
"discriminator": {
"propertyName": "type"
}
},
"MessageContent": {
"oneOf": [
{
"type": "string"
},
{
"type": "array",
"items": {
"$ref": "#/components/schemas/MessageChunk"
}
}
]
},
"ModelInfo": {
"type": "object",
"required": [
"id",
"object",
"created",
"owned_by"
],
"properties": {
"created": {
"type": "integer",
"format": "int64",
"example": 1686935002,
"minimum": 0
},
"id": {
"type": "string",
"example": "gpt2"
},
"object": {
"type": "string",
"example": "model"
},
"owned_by": {
"type": "string",
"example": "openai"
}
}
},
"OutputMessage": {
"oneOf": [
{
"$ref": "#/components/schemas/TextMessage"
},
{
"$ref": "#/components/schemas/ToolCallMessage"
}
]
},
"PrefillToken": {
"type": "object",
"required": [
"id",
"text",
"logprob"
],
"properties": {
"id": {
"type": "integer",
"format": "int32",
"example": 0,
"minimum": 0
},
"logprob": {
"type": "number",
"format": "float",
"example": -0.34,
"nullable": true
},
"text": {
"type": "string",
"example": "test"
}
}
},
"Prompt": {
"type": "array",
"items": {
"type": "string"
}
},
"SimpleToken": {
"type": "object",
"required": [
"id",
"text",
"start",
"stop"
],
"properties": {
"id": {
"type": "integer",
"format": "int32",
"example": 0,
"minimum": 0
},
"start": {
"type": "integer",
"example": 0,
"minimum": 0
},
"stop": {
"type": "integer",
"example": 2,
"minimum": 0
},
"text": {
"type": "string",
"example": "test"
}
}
},
"StreamDetails": {
"type": "object",
"required": [
"finish_reason",
"generated_tokens",
"input_length"
],
"properties": {
"finish_reason": {
"$ref": "#/components/schemas/FinishReason"
},
"generated_tokens": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"input_length": {
"type": "integer",
"format": "int32",
"example": 1,
"minimum": 0
},
"seed": {
"type": "integer",
"format": "int64",
"example": 42,
"nullable": true,
"minimum": 0
}
}
},
"StreamResponse": {
"type": "object",
"required": [
"index",
"token"
],
"properties": {
"details": {
"allOf": [
{
"$ref": "#/components/schemas/StreamDetails"
}
],
"default": "null",
"nullable": true
},
"generated_text": {
"type": "string",
"default": "null",
"example": "test",
"nullable": true
},
"index": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"token": {
"$ref": "#/components/schemas/Token"
},
"top_tokens": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Token"
}
}
}
},
"TextMessage": {
"type": "object",
"required": [
"role",
"content"
],
"properties": {
"content": {
"type": "string",
"example": "My name is David and I"
},
"role": {
"type": "string",
"example": "user"
}
}
},
"Token": {
"type": "object",
"required": [
"id",
"text",
"logprob",
"special"
],
"properties": {
"id": {
"type": "integer",
"format": "int32",
"example": 0,
"minimum": 0
},
"logprob": {
"type": "number",
"format": "float",
"example": -0.34,
"nullable": true
},
"special": {
"type": "boolean",
"example": "false"
},
"text": {
"type": "string",
"example": "test"
}
}
},
"TokenizeResponse": {
"type": "array",
"items": {
"$ref": "#/components/schemas/SimpleToken"
}
},
"Tool": {
"type": "object",
"required": [
"type",
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/FunctionDefinition"
},
"type": {
"type": "string",
"example": "function"
}
}
},
"ToolCall": {
"type": "object",
"required": [
"id",
"type",
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/FunctionDefinition"
},
"id": {
"type": "string"
},
"type": {
"type": "string"
}
}
},
"ToolCallDelta": {
"type": "object",
"required": [
"role",
"tool_calls"
],
"properties": {
"role": {
"type": "string",
"example": "assistant"
},
"tool_calls": {
"$ref": "#/components/schemas/DeltaToolCall"
}
}
},
"ToolCallMessage": {
"type": "object",
"required": [
"role",
"tool_calls"
],
"properties": {
"role": {
"type": "string",
"example": "assistant"
},
"tool_calls": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ToolCall"
}
}
}
},
"ToolChoice": {
"allOf": [
{
"$ref": "#/components/schemas/ToolType"
}
],
"nullable": true
},
"ToolType": {
"oneOf": [
{
"type": "object",
"default": null,
"nullable": true
},
{
"type": "string"
},
{
"type": "object",
"required": [
"function"
],
"properties": {
"function": {
"$ref": "#/components/schemas/FunctionName"
}
}
},
{
"type": "object",
"default": null,
"nullable": true
}
]
},
"Url": {
"type": "object",
"required": [
"url"
],
"properties": {
"url": {
"type": "string"
}
}
},
"Usage": {
"type": "object",
"required": [
"prompt_tokens",
"completion_tokens",
"total_tokens"
],
"properties": {
"completion_tokens": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"prompt_tokens": {
"type": "integer",
"format": "int32",
"minimum": 0
},
"total_tokens": {
"type": "integer",
"format": "int32",
"minimum": 0
}
}
}
}
},
"tags": [
{
"name": "Text Generation Inference",
"description": "Hugging Face Text Generation Inference API"
}
]
}
| text-generation-inference/docs/openapi.json/0 | {
"file_path": "text-generation-inference/docs/openapi.json",
"repo_id": "text-generation-inference",
"token_count": 34879
} | 212 |
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"text": " PR for flake8"
}
],
"created": 1713284454,
"id": "",
"model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
"object": "text_completion",
"system_fingerprint": "2.0.1-native",
"usage": {
"completion_tokens": 5,
"prompt_tokens": 6,
"total_tokens": 11
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_single_prompt.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_single_prompt.json",
"repo_id": "text-generation-inference",
"token_count": 203
} | 213 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1247,
"logprob": -5.2421875,
"text": "User"
},
{
"id": 28747,
"logprob": -6.9570312,
"text": ":"
},
{
"id": 32000,
"logprob": -16.234375,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.28125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.8828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.9921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.015625,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -22.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.2734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.859375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.546875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.1953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.2988281,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.7421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.9453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.4453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.7207031,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -23.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.0625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.6640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.0917969,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.1328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.3984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.6484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -13.6171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.9609375,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.0332031,
"text": "<fake_token_around_image>"
},
{
"id": 12018,
"logprob": -12.078125,
"text": "Write"
},
{
"id": 528,
"logprob": -10.09375,
"text": "me"
},
{
"id": 264,
"logprob": -0.103393555,
"text": "a"
},
{
"id": 2485,
"logprob": -4.5742188,
"text": "short"
},
{
"id": 2838,
"logprob": -0.23815918,
"text": "story"
},
{
"id": 32002,
"logprob": -10.9765625,
"text": "<end_of_utterance>"
},
{
"id": 259,
"logprob": -20.34375,
"text": " "
},
{
"id": 13,
"logprob": -8.53125,
"text": "\n"
},
{
"id": 7226,
"logprob": -10.4765625,
"text": "Ass"
},
{
"id": 11143,
"logprob": -13.6015625,
"text": "istant"
},
{
"id": 28747,
"logprob": -0.008514404,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09289551,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31396484,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051727295,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34448242,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.03237915,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018751621,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07043457,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.00422287,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1247,
"logprob": -5.2382812,
"text": "User"
},
{
"id": 28747,
"logprob": -6.9492188,
"text": ":"
},
{
"id": 32000,
"logprob": -16.234375,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.28125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.859375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.8828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.015625,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -22.109375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.8046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.2734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.859375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.546875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.1953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.2988281,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.4453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.7207031,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -23.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.28125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.6640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.0917969,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.1328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.6484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -13.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.9609375,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.0332031,
"text": "<fake_token_around_image>"
},
{
"id": 12018,
"logprob": -12.078125,
"text": "Write"
},
{
"id": 528,
"logprob": -10.109375,
"text": "me"
},
{
"id": 264,
"logprob": -0.103515625,
"text": "a"
},
{
"id": 2485,
"logprob": -4.5664062,
"text": "short"
},
{
"id": 2838,
"logprob": -0.23864746,
"text": "story"
},
{
"id": 32002,
"logprob": -10.9609375,
"text": "<end_of_utterance>"
},
{
"id": 259,
"logprob": -20.34375,
"text": " "
},
{
"id": 13,
"logprob": -8.5546875,
"text": "\n"
},
{
"id": 7226,
"logprob": -10.484375,
"text": "Ass"
},
{
"id": 11143,
"logprob": -13.6015625,
"text": "istant"
},
{
"id": 28747,
"logprob": -0.008308411,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09448242,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31201172,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051635742,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34033203,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.032562256,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018763542,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07122803,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.0041007996,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1247,
"logprob": -5.2382812,
"text": "User"
},
{
"id": 28747,
"logprob": -6.9492188,
"text": ":"
},
{
"id": 32000,
"logprob": -16.234375,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.28125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.859375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.8828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.015625,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -22.109375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.8046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.2734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.859375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.546875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.1953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.2988281,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.4453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.7207031,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -23.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.28125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.6640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.0917969,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.1328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.6484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -13.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.9609375,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.0332031,
"text": "<fake_token_around_image>"
},
{
"id": 12018,
"logprob": -12.078125,
"text": "Write"
},
{
"id": 528,
"logprob": -10.109375,
"text": "me"
},
{
"id": 264,
"logprob": -0.103515625,
"text": "a"
},
{
"id": 2485,
"logprob": -4.5664062,
"text": "short"
},
{
"id": 2838,
"logprob": -0.23864746,
"text": "story"
},
{
"id": 32002,
"logprob": -10.9609375,
"text": "<end_of_utterance>"
},
{
"id": 259,
"logprob": -20.34375,
"text": " "
},
{
"id": 13,
"logprob": -8.5546875,
"text": "\n"
},
{
"id": 7226,
"logprob": -10.484375,
"text": "Ass"
},
{
"id": 11143,
"logprob": -13.6015625,
"text": "istant"
},
{
"id": 28747,
"logprob": -0.008308411,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09448242,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31201172,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051635742,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34033203,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.032562256,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018787384,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07122803,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.0041007996,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1,
"logprob": null,
"text": "<s>"
},
{
"id": 1247,
"logprob": -5.2382812,
"text": "User"
},
{
"id": 28747,
"logprob": -6.9492188,
"text": ":"
},
{
"id": 32000,
"logprob": -16.234375,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.28125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.859375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.8828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -23.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.015625,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -22.109375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.8046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.2734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.859375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.15625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.546875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.1953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.84375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.2988281,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.65625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.953125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.4453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.21875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.359375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.5625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.7207031,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -23.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.078125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.28125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.203125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.03125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.9375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.765625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.6640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.3125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.671875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.96875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.8125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.09375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.34375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.171875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.578125,
"text": "<image>"
},
{
"id": 32000,
"logprob": -3.0917969,
"text": "<fake_token_around_image>"
},
{
"id": 32001,
"logprob": -25.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.6875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.71875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.453125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.796875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.1328125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.90625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.734375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.25,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.5,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.59375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.984375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.53125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.4375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.265625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -22.296875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.75,
"text": "<image>"
},
{
"id": 32001,
"logprob": -14.6484375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.609375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.828125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.015625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.046875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.234375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.140625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.0,
"text": "<image>"
},
{
"id": 32001,
"logprob": -18.78125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.703125,
"text": "<image>"
},
{
"id": 32001,
"logprob": -13.625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.375,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.515625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -21.921875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.640625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.46875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -16.421875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.890625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -17.40625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -20.390625,
"text": "<image>"
},
{
"id": 32001,
"logprob": -19.1875,
"text": "<image>"
},
{
"id": 32001,
"logprob": -15.9609375,
"text": "<image>"
},
{
"id": 32000,
"logprob": -2.0332031,
"text": "<fake_token_around_image>"
},
{
"id": 12018,
"logprob": -12.078125,
"text": "Write"
},
{
"id": 528,
"logprob": -10.109375,
"text": "me"
},
{
"id": 264,
"logprob": -0.103515625,
"text": "a"
},
{
"id": 2485,
"logprob": -4.5664062,
"text": "short"
},
{
"id": 2838,
"logprob": -0.23864746,
"text": "story"
},
{
"id": 32002,
"logprob": -10.9609375,
"text": "<end_of_utterance>"
},
{
"id": 259,
"logprob": -20.34375,
"text": " "
},
{
"id": 13,
"logprob": -8.5546875,
"text": "\n"
},
{
"id": 7226,
"logprob": -10.484375,
"text": "Ass"
},
{
"id": 11143,
"logprob": -13.6015625,
"text": "istant"
},
{
"id": 28747,
"logprob": -0.008308411,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 330,
"logprob": -0.09448242,
"special": false,
"text": " A"
},
{
"id": 13088,
"logprob": -0.6743164,
"special": false,
"text": " chicken"
},
{
"id": 349,
"logprob": -0.31201172,
"special": false,
"text": " is"
},
{
"id": 6398,
"logprob": -0.051635742,
"special": false,
"text": " sitting"
},
{
"id": 356,
"logprob": -0.34033203,
"special": false,
"text": " on"
},
{
"id": 264,
"logprob": -0.1194458,
"special": false,
"text": " a"
},
{
"id": 17972,
"logprob": -0.032562256,
"special": false,
"text": " pile"
},
{
"id": 302,
"logprob": -0.00018763542,
"special": false,
"text": " of"
},
{
"id": 2445,
"logprob": -0.07122803,
"special": false,
"text": " money"
},
{
"id": 28723,
"logprob": -0.0041007996,
"special": false,
"text": "."
}
],
"top_tokens": null
},
"generated_text": " A chicken is sitting on a pile of money."
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_idefics2/test_flash_idefics2_next_load.json",
"repo_id": "text-generation-inference",
"token_count": 101154
} | 214 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": 0,
"tokens": [
{
"id": 16017,
"logprob": 0.0,
"special": false,
"text": " blue"
},
{
"id": 20495,
"logprob": 0.0,
"special": false,
"text": " sky"
},
{
"id": 259,
"logprob": -0.46948242,
"special": false,
"text": " "
},
{
"id": 261,
"logprob": -0.15307617,
"special": false,
"text": ","
},
{
"id": 35622,
"logprob": -0.79589844,
"special": false,
"text": " cloud"
},
{
"id": 263,
"logprob": -1.2958984,
"special": false,
"text": "s"
},
{
"id": 305,
"logprob": 0.0,
"special": false,
"text": " and"
},
{
"id": 35622,
"logprob": -1.2998047,
"special": false,
"text": " cloud"
},
{
"id": 263,
"logprob": 0.0,
"special": false,
"text": "s"
},
{
"id": 1,
"logprob": 0.0,
"special": true,
"text": "</s>"
}
],
"top_tokens": null
},
"generated_text": "Why is the sky blue?blue sky, clouds and clouds"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 911
} | 215 |
import pytest
@pytest.fixture(scope="module")
def bloom_560_handle(launcher):
with launcher("bigscience/bloom-560m") as handle:
yield handle
@pytest.fixture(scope="module")
async def bloom_560(bloom_560_handle):
await bloom_560_handle.health(240)
return bloom_560_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_bloom_560m(bloom_560, response_snapshot):
response = await bloom_560.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
top_p=0.9,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_bloom_560m_all_params(bloom_560, response_snapshot):
response = await bloom_560.generate(
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_bloom_560m_load(bloom_560, generate_load, response_snapshot):
responses = await generate_load(
bloom_560,
"Pour déguster un ortolan, il faut tout d'abord",
max_new_tokens=10,
n=4,
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_bloom_560m.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_bloom_560m.py",
"repo_id": "text-generation-inference",
"token_count": 776
} | 216 |
import pytest
@pytest.fixture(scope="module")
def flash_llama_gptq_handle(launcher):
with launcher(
"astronomer/Llama-3-8B-Instruct-GPTQ-4-Bit", num_shard=2, quantize="gptq"
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_gptq(flash_llama_gptq_handle):
await flash_llama_gptq_handle.health(300)
return flash_llama_gptq_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_gptq(flash_llama_gptq, response_snapshot):
response = await flash_llama_gptq.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_gptq_all_params(flash_llama_gptq, response_snapshot):
response = await flash_llama_gptq.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_gptq_load(
flash_llama_gptq, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_gptq, "Test request", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama_gptq.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama_gptq.py",
"repo_id": "text-generation-inference",
"token_count": 769
} | 217 |
import pytest
import base64
@pytest.fixture(scope="module")
def idefics_handle(launcher):
with launcher(
"HuggingFaceM4/idefics-9b-instruct", num_shard=2, dtype="float16"
) as handle:
yield handle
@pytest.fixture(scope="module")
async def idefics(idefics_handle):
await idefics_handle.health(300)
return idefics_handle.client
# TODO fix the server parsser to count inline image tokens correctly
def get_chicken():
with open("integration-tests/images/chicken_on_money.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return f"data:image/png;base64,{encoded_string.decode('utf-8')}"
def get_cow_beach():
with open("integration-tests/images/cow_beach.png", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return f"data:image/png;base64,{encoded_string.decode('utf-8')}"
@pytest.mark.asyncio
async def test_idefics(idefics, response_snapshot):
chicken = get_chicken()
response = await idefics.generate(
f"User:Can you tell me a very short story based on the image?",
max_new_tokens=10,
decoder_input_details=True,
)
assert response.details.generated_tokens == 10
assert (
response.generated_text == " \nAssistant: A rooster stands"
), f"{repr(response.generated_text)}"
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_idefics_two_images(idefics, response_snapshot):
chicken = get_chicken()
cow_beach = get_cow_beach()
response = await idefics.generate(
f"User:Where are the cow and chicken?<end_of_utterance> \nAssistant:",
max_new_tokens=20,
)
assert (
response.generated_text == " The cow and chicken are on a beach."
), f"{repr(response.generated_text)}"
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_idefics_load(idefics, generate_load, response_snapshot):
chicken = get_chicken()
responses = await generate_load(
idefics,
f"User:Can you tell me a very short story based on the image?",
max_new_tokens=10,
n=4,
)
generated_texts = [r.generated_text for r in responses]
assert generated_texts[0] == " \nAssistant: A rooster stands"
assert len(generated_texts) == 4
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_idefics.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_idefics.py",
"repo_id": "text-generation-inference",
"token_count": 1030
} | 218 |
[package]
name = "text-generation-launcher"
description = "Text Generation Launcher"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[dependencies]
clap = { version = "4.4.5", features = ["derive", "env"] }
ctrlc = { version = "3.4.1", features = ["termination"] }
hf-hub = "0.3.2"
nix = { version = "0.28.0", features = ["signal"] }
once_cell = "1.19.0"
serde = { version = "1.0.188", features = ["derive"] }
serde_json = "1.0.107"
thiserror = "1.0.59"
tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] }
[dev-dependencies]
float_eq = "1.0.1"
reqwest = { version = "0.11.20", features = ["blocking", "json"] }
[build-dependencies]
vergen = { version = "8.2.5", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] }
| text-generation-inference/launcher/Cargo.toml/0 | {
"file_path": "text-generation-inference/launcher/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 322
} | 219 |
include Makefile-flash-att
include Makefile-flash-att-v2
include Makefile-vllm
include Makefile-awq
include Makefile-eetq
include Makefile-selective-scan
include Makefile-lorax-punica
include Makefile-fbgemm
include Makefile-exllamav2
include Makefile-flashinfer
unit-tests:
pytest -s -vv -m "not private" tests
gen-server:
# Compile protos
pip install grpcio-tools==1.62.2 mypy-protobuf==3.6.0 'types-protobuf' --no-cache-dir
mkdir text_generation_server/pb || true
python -m grpc_tools.protoc -I../proto/v3 --python_out=text_generation_server/pb \
--grpc_python_out=text_generation_server/pb --mypy_out=text_generation_server/pb ../proto/v3/generate.proto
find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \;
touch text_generation_server/pb/__init__.py
install-server: gen-server
pip install pip --upgrade
pip install -r requirements_cuda.txt
pip install -e ".[accelerate, quantize, peft, outlines]"
install: install-cuda
echo "Installed server"
install-cuda: install-server install-flash-attention-v2-cuda install-vllm-cuda install-flash-attention install-fbgemm
pip install -e ".[bnb]"
pip install nvidia-nccl-cu12==2.22.3
install-rocm: install-server install-flash-attention-v2-rocm install-vllm-rocm
run-dev:
SAFETENSORS_FAST_GPU=1 python -m torch.distributed.run --nproc_per_node=2 text_generation_server/cli.py serve bigscience/bloom-560m --sharded
export-requirements:
poetry export -o requirements_cuda.txt --without-hashes
poetry export -o requirements_rocm.txt --without-hashes
poetry export -o requirements_intel.txt --without-hashes
| text-generation-inference/server/Makefile/0 | {
"file_path": "text-generation-inference/server/Makefile",
"repo_id": "text-generation-inference",
"token_count": 610
} | 220 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#define _cuda_buffers_cu
#include "cuda_buffers.cuh"
CudaBuffers* g_buffers[CUDA_MAX_DEVICES] = {NULL};
// __constant__ half2 q4_table[16][256];
// half2 q4_table_host[16][256];
// bool q4_table_init = false;
CudaBuffers::CudaBuffers
(
int _device,
half* _temp_state,
half* _temp_dq
) :
device(_device),
temp_state(_temp_state),
temp_dq(_temp_dq)
{
cudaSetDevice(_device);
cudaStreamCreate(&alt_stream_1);
cudaStreamCreate(&alt_stream_2);
cudaStreamCreate(&alt_stream_3);
cudaEventCreate(&alt_stream_1_done);
cudaEventCreate(&alt_stream_2_done);
cudaEventCreate(&alt_stream_3_done);
}
CudaBuffers::~CudaBuffers()
{
cudaStreamDestroy(alt_stream_1);
cudaStreamDestroy(alt_stream_2);
cudaStreamDestroy(alt_stream_3);
cudaEventDestroy(alt_stream_1_done);
cudaEventDestroy(alt_stream_2_done);
cudaEventDestroy(alt_stream_3_done);
}
CudaBuffers* get_buffers(const int device_index)
{
return g_buffers[device_index];
}
void prepare_buffers_cuda
(
int _device,
half* _temp_state,
half* _temp_dq
)
{
CudaBuffers* buffers = new CudaBuffers
(
_device,
_temp_state,
_temp_dq
);
g_buffers[_device] = buffers;
}
void cleanup_buffers_cuda()
{
for (int i = 0; i < CUDA_MAX_DEVICES; i++)
{
if (!g_buffers[i]) continue;
delete g_buffers[i];
g_buffers[i] = NULL;
}
}
| text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_buffers.cu",
"repo_id": "text-generation-inference",
"token_count": 680
} | 221 |
#include <torch/extension.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include "config.h"
#include "cuda/q_matrix.cuh"
#include "cuda/q_gemm.cuh"
#include "cpp/util.h"
// Some decluttering macros
#define TORCH_CHECK_DTYPE(__x, __dtype) TORCH_CHECK((__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_DTYPE_OPT(__x, __dtype) TORCH_CHECK((__x).device().is_meta() || (__x).dtype() == torch::__dtype, #__x " is incorrect datatype, must be " #__dtype)
#define TORCH_CHECK_SHAPES(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
#define TORCH_CHECK_SHAPES_OPT(__x, __dim_x, __y, __dim_y, __scale_y) TORCH_CHECK((__x).device().is_meta() || (__x).size(__dim_x) == (__y).size(__dim_y) * __scale_y, #__x " and " #__y " have incompatible shapes")
// Quant matrix
uintptr_t make_q_matrix
(
torch::Tensor q_weight,
torch::Tensor q_perm,
torch::Tensor q_invperm,
torch::Tensor q_scale,
torch::Tensor q_scale_max,
torch::Tensor q_groups,
torch::Tensor q_group_map,
torch::Tensor gptq_qzeros,
torch::Tensor gptq_scales,
torch::Tensor gptq_g_idx,
torch::Tensor temp_dq
)
{
TORCH_CHECK_DTYPE(q_weight, kInt);
TORCH_CHECK_DTYPE_OPT(q_perm, kShort);
TORCH_CHECK_DTYPE_OPT(q_invperm, kShort);
TORCH_CHECK_DTYPE_OPT(q_scale, kInt);
TORCH_CHECK_DTYPE_OPT(q_scale_max, kHalf);
TORCH_CHECK_DTYPE_OPT(q_groups, kShort);
TORCH_CHECK_DTYPE_OPT(q_group_map, kShort);
TORCH_CHECK_DTYPE_OPT(gptq_qzeros, kInt);
TORCH_CHECK_DTYPE_OPT(gptq_scales, kHalf);
TORCH_CHECK_DTYPE_OPT(gptq_g_idx, kInt);
TORCH_CHECK_SHAPES(q_perm, 0, q_invperm, 0, 1);
int device = q_weight.device().index();
int width = q_weight.size(1);
int groups;
int height;
if (!q_scale.device().is_meta())
{
TORCH_CHECK_SHAPES(q_weight, 1, q_scale, 1, 8);
TORCH_CHECK_SHAPES(q_scale_max, 0, q_scale, 0, 1);
groups = q_scale.size(0);
height = q_invperm.size(0);
}
else
{
TORCH_CHECK_SHAPES(q_weight, 1, gptq_qzeros, 1, 8);
TORCH_CHECK_SHAPES(q_weight, 1, gptq_scales, 1, 1);
groups = gptq_qzeros.size(0);
height = q_weight.size(0) * 8;
}
TORCH_CHECK(temp_dq.size(0) >= width * height, "Insufficient size of temp_dq buffer")
QMatrix* m = new QMatrix
(
device,
height,
width,
groups,
(uint32_t*) q_weight.data_ptr(),
q_perm.device().is_meta() ? NULL : (uint16_t*) q_perm.data_ptr(),
q_invperm.device().is_meta() ? NULL : (uint16_t*) q_invperm.data_ptr(),
q_scale.device().is_meta() ? NULL : (uint32_t*) q_scale.data_ptr(),
q_scale_max.device().is_meta() ? NULL : (half*) q_scale_max.data_ptr(),
q_groups.device().is_meta() ? NULL : (uint16_t*) q_groups.data_ptr(),
q_group_map.device().is_meta() ? NULL : (uint16_t*) q_group_map.data_ptr(),
gptq_qzeros.device().is_meta() ? NULL : (uint32_t*) gptq_qzeros.data_ptr(),
gptq_scales.device().is_meta() ? NULL : (half*) gptq_scales.data_ptr(),
gptq_g_idx.device().is_meta() ? NULL : (uint32_t*) gptq_g_idx.data_ptr(),
(half*) temp_dq.data_ptr()
);
if (m->failed) throw std::runtime_error("CUDA out of memory");
return reinterpret_cast<uintptr_t> (m);
}
void gemm_half_q_half
(
torch::Tensor a,
uintptr_t b,
torch::Tensor c,
bool force_cuda
)
{
QMatrix* qm = reinterpret_cast<QMatrix*> (b);
TORCH_CHECK_DTYPE(a, kHalf);
TORCH_CHECK_DTYPE(c, kHalf);
TORCH_CHECK_SHAPES(a, 0, c, 0, 1);
TORCH_CHECK(qm->height == a.size(1), "a and b have incompatible shapes")
TORCH_CHECK(qm->width == c.size(1), "b and c have incompatible shapes")
const at::cuda::OptionalCUDAGuard device_guard(device_of(a));
gemm_half_q_half_cuda
(
at::cuda::getCurrentCUDABlasHandle(),
(const half*) a.data_ptr(),
qm,
(half*) c.data_ptr(),
c.size(0), // m
c.size(1), // n
a.size(1), // k
true,
NULL,
force_cuda
);
}
// Bindings
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
{
m.def("make_q_matrix", &make_q_matrix, "make_q_matrix");
m.def("gemm_half_q_half", &gemm_half_q_half, "gemm_half_q_half");
}
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/ext.cpp",
"repo_id": "text-generation-inference",
"token_count": 2184
} | 222 |
import torch
from text_generation_server.layers import (
TensorParallelEmbedding,
)
class ProcessGroup:
def __init__(self, rank: int, world_size: int):
self._rank = rank
self.world_size = world_size
def size(self) -> int:
return self.world_size
def rank(self) -> int:
return self._rank
class Weights:
def __init__(self, rank: int, world_size: int, vocab_size: int, hidden_dim: int):
self.weight = (
torch.arange(vocab_size * hidden_dim).float().view(vocab_size, hidden_dim)
)
self.process_group = ProcessGroup(rank, world_size)
def get_partial_sharded(self, name: str, dim: int):
assert dim == 0
rank = self.process_group.rank()
world_size = self.process_group.size()
size = self.weight.shape[dim]
block_size = (size + world_size - 1) // world_size
start = rank * block_size
stop = (rank + 1) * block_size
return self.weight[start:stop]
def get_shape(self, name: str):
return self.weight.shape
def test_weight_hub_files_offline_error():
vocab_size = 17
weights = Weights(
rank=0,
world_size=1,
vocab_size=vocab_size,
hidden_dim=256,
)
embeddings = TensorParallelEmbedding("", weights)
input_ids = torch.arange(vocab_size)
output = embeddings.forward(input_ids)
assert embeddings.min_id == 0
assert embeddings.max_id == 17
torch.testing.assert_close(output, torch.arange(256 * 17).float().view(17, 256))
weights_0_2 = Weights(rank=0, world_size=2, vocab_size=vocab_size, hidden_dim=256)
weights_1_2 = Weights(rank=1, world_size=2, vocab_size=vocab_size, hidden_dim=256)
embeddings_0_2 = TensorParallelEmbedding("", weights_0_2, reduce=False)
assert embeddings_0_2.min_id == 0
assert embeddings_0_2.max_id == 9
torch.testing.assert_close(
embeddings_0_2.weight,
torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0)
.view(10, 256)
.float(),
)
embeddings_1_2 = TensorParallelEmbedding("", weights_1_2, reduce=False)
assert embeddings_1_2.min_id == 9
assert embeddings_1_2.max_id == 17
torch.testing.assert_close(
embeddings_1_2.weight,
torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0)
.view(9, 256)
.float(),
)
output_tp_0 = embeddings_0_2.forward(input_ids)
output_tp_1 = embeddings_1_2.forward(input_ids)
torch.testing.assert_close(output, output_tp_0 + output_tp_1)
| text-generation-inference/server/tests/utils/test_layers.py/0 | {
"file_path": "text-generation-inference/server/tests/utils/test_layers.py",
"repo_id": "text-generation-inference",
"token_count": 1146
} | 223 |
#!/usr/bin/env python
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention v2 algorithm from Tri Dao
(https://tridao.me/publications/flash2/flash2.pdf)
Credits: OpenAI kernel team, AMD ML Frameworks Triton team
Features supported:
1) Fwd with causal masking
2) Any sequence lengths without padding (currently fwd kernel only)
3) Support for different sequence lengths for q and k
4) Nested tensor API currently does not support dropout or bias.
Not currently supported:
1) Non power of two head dims
"""
import torch
import triton
import triton.language as tl
torch_dtype: tl.constexpr = torch.float16
@triton.jit
def cdiv_fn(x, y):
return (x + y - 1) // y
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def dropout_offsets(philox_seed, philox_offset, dropout_p, m, n, stride):
ms = tl.arange(0, m)
ns = tl.arange(0, n)
return philox_offset + ms[:, None] * stride + ns[None, :]
@triton.jit
def dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride):
rng_offsets = dropout_offsets(
philox_seed, philox_offset, dropout_p, m, n, stride
).to(tl.uint32)
# TODO: use tl.randint for better performance
return tl.rand(philox_seed, rng_offsets)
@triton.jit
def dropout_mask(philox_seed, philox_offset, dropout_p, m, n, stride):
rng_output = dropout_rng(philox_seed, philox_offset, dropout_p, m, n, stride)
rng_keep = rng_output > dropout_p
return rng_keep
@triton.jit
def load_fn(block_ptr, first, second, pad):
if first and second:
tensor = tl.load(block_ptr, boundary_check=(0, 1), padding_option=pad)
elif first:
tensor = tl.load(block_ptr, boundary_check=(0,), padding_option=pad)
elif second:
tensor = tl.load(block_ptr, boundary_check=(1,), padding_option=pad)
else:
tensor = tl.load(block_ptr)
return tensor
@triton.jit
def _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
actual_seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
block_min,
block_max,
offs_n_causal,
masked_blocks,
n_extra_tokens,
bias_ptr,
IS_CAUSAL: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
OFFS_M: tl.constexpr,
OFFS_N: tl.constexpr,
PRE_LOAD_V: tl.constexpr,
MASK_STEPS: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr,
PADDED_HEAD: tl.constexpr,
):
# loop over k, v, and update accumulator
for start_n in range(block_min, block_max, BLOCK_N):
# For padded blocks, we will overrun the tensor size if
# we load all BLOCK_N. For others, the blocks are all within range.
k = load_fn(
K_block_ptr,
PADDED_HEAD,
MASK_STEPS and (n_extra_tokens != 0),
"zero",
)
if PRE_LOAD_V:
v = load_fn(
V_block_ptr,
MASK_STEPS and (n_extra_tokens != 0),
PADDED_HEAD,
"zero",
)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
# We start from end of seqlen_k so only the first iteration would need
# to be checked for padding if it is not a multiple of block_n
# TODO: This can be optimized to only be true for the padded block.
if MASK_STEPS: # noqa: SIM102
# If this is the last block / iteration, we want to
# mask if the sequence length is not a multiple of block size
# a solution is to always do BLOCK_M // BLOCK_N + 1 steps
# if not is_modulo_mn. last step might get wasted but that is okay.
# check if this masking works for that case.
if (start_n + BLOCK_N == block_max) and (n_extra_tokens != 0):
boundary_m = tl.full([BLOCK_M], actual_seqlen_k, dtype=tl.int32)
size_n = start_n + OFFS_N[None, :]
mask = size_n < boundary_m[:, None]
qk = tl.where(mask, qk, float("-inf"))
if IS_CAUSAL:
causal_boundary = start_n + offs_n_causal
causal_mask = OFFS_M[:, None] >= causal_boundary[None, :]
qk = tl.where(causal_mask, qk, float("-inf"))
# -- compute qk ----
qk += tl.dot(q, k)
if bias_ptr is not None:
bias = load_fn(
bias_ptr, False, MASK_STEPS and (n_extra_tokens != 0), "zero"
)
# While bias is added after multiplying qk with sm_scale, our
# optimization to use 2^x instead of e^x results in an additional
# scale factor of log2(e) which we must also multiply the bias with.
qk += bias * 1.44269504089
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk = qk - m_ij[:, None]
p = tl.math.exp2(qk)
# CAVEAT: Must update l_ij before applying dropout
l_ij = tl.sum(p, 1)
if ENABLE_DROPOUT:
philox_offset = (
batch_philox_offset
+ start_m * BLOCK_M * actual_seqlen_k
+ start_n
- BLOCK_N
)
keep = dropout_mask(
philox_seed,
philox_offset,
dropout_p,
BLOCK_M,
BLOCK_N,
actual_seqlen_k,
)
if RETURN_ENCODED_SOFTMAX:
tl.store(
encoded_softmax_block_ptr,
tl.where(keep, p, -p).to(encoded_softmax_block_ptr.type.element_ty),
)
p = tl.where(keep, p, 0.0)
elif RETURN_ENCODED_SOFTMAX:
tl.store(
encoded_softmax_block_ptr,
p.to(encoded_softmax_block_ptr.type.element_ty),
)
# -- update output accumulator --
alpha = tl.math.exp2(m_i - m_ij)
acc = acc * alpha[:, None]
if not PRE_LOAD_V:
v = load_fn(
V_block_ptr,
MASK_STEPS and (n_extra_tokens != 0),
PADDED_HEAD,
"zero",
)
# -- update m_i and l_i
l_i = l_i * alpha + l_ij
# update m_i and l_i
m_i = m_ij
acc += tl.dot(p.to(V_block_ptr.type.element_ty), v)
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
if bias_ptr is not None:
bias_ptr = tl.advance(bias_ptr, (0, BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(
encoded_softmax_block_ptr, (0, BLOCK_N)
)
return acc, l_i, m_i
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_M": 256,
"BLOCK_N": 64,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 128,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 256,
"BLOCK_N": 128,
"waves_per_eu": 2,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 3,
"PRE_LOAD_V": True,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 3,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 64,
"BLOCK_N": 64,
"waves_per_eu": 4,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
triton.Config(
{
"BLOCK_M": 32,
"BLOCK_N": 32,
"waves_per_eu": 4,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=8,
),
# TODO: This config fails with head_size not pow2 with data mismatches.
# triton.Config({'BLOCK_M': 32, 'BLOCK_N': 16, 'waves_per_eu': 1,
# 'PRE_LOAD_V': False}, num_stages=1, num_warps=4),
triton.Config(
{
"BLOCK_M": 16,
"BLOCK_N": 16,
"waves_per_eu": 1,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
triton.Config(
{
"BLOCK_M": 128,
"BLOCK_N": 64,
"waves_per_eu": 1,
"PRE_LOAD_V": False,
},
num_stages=1,
num_warps=4,
),
],
key=["IS_CAUSAL", "dropout_p", "BLOCK_DMODEL"],
)
@triton.jit
def attn_fwd(
Q,
K,
V,
bias,
sm_scale,
L,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
stride_bz,
stride_bh,
stride_bm,
stride_bn,
cu_seqlens_q,
cu_seqlens_k,
dropout_p,
philox_seed,
philox_offset_base,
encoded_softmax,
HQ: tl.constexpr,
HK: tl.constexpr,
ACTUAL_BLOCK_DMODEL: tl.constexpr,
MAX_SEQLENS_Q: tl.constexpr,
MAX_SEQLENS_K: tl.constexpr,
VARLEN: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
PRE_LOAD_V: tl.constexpr,
BIAS_TYPE: tl.constexpr,
ENABLE_DROPOUT: tl.constexpr,
RETURN_ENCODED_SOFTMAX: tl.constexpr,
):
start_m = tl.program_id(0)
off_h_q = tl.program_id(1)
off_z = tl.program_id(2)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
if VARLEN:
cu_seqlens_q_start = tl.load(cu_seqlens_q + off_z)
cu_seqlens_q_end = tl.load(cu_seqlens_q + off_z + 1)
seqlen_q = cu_seqlens_q_end - cu_seqlens_q_start
# We have a one-size-fits-all grid in id(0). Some seqlens might be too
# small for all start_m so for those we return early.
if start_m * BLOCK_M > seqlen_q:
return
cu_seqlens_k_start = tl.load(cu_seqlens_k + off_z)
cu_seqlens_k_end = tl.load(cu_seqlens_k + off_z + 1)
seqlen_k = cu_seqlens_k_end - cu_seqlens_k_start
else:
cu_seqlens_q_start = 0
cu_seqlens_k_start = 0
seqlen_q = MAX_SEQLENS_Q
seqlen_k = MAX_SEQLENS_K
# Now we compute whether we need to exit early due to causal masking.
# This is because for seqlen_q > seqlen_k, M rows of the attn scores
# are completely masked, resulting in 0s written to the output, and
# inf written to LSE. We don't need to do any GEMMs in this case.
# This block of code determines what N is, and if this WG is operating
# on those M rows.
n_blocks = cdiv_fn(seqlen_k, BLOCK_N)
if IS_CAUSAL:
# If seqlen_q == seqlen_k, the attn scores are a square matrix.
# If seqlen_q != seqlen_k, attn scores are rectangular which means
# the causal mask boundary is bottom right aligned, and ends at either
# the top edge (seqlen_q < seqlen_k) or left edge.
# This captures the decrease in n_blocks if we have a rectangular attn
# matrix
n_blocks_seqlen = cdiv_fn(
(start_m + 1) * BLOCK_M + seqlen_k - seqlen_q, BLOCK_N
)
# This is what adjusts the block_max for the current WG, only
# if IS_CAUSAL. Otherwise we want to always iterate through all n_blocks
n_blocks = min(n_blocks, n_blocks_seqlen)
# If we have no blocks after adjusting for seqlen deltas, this WG is
# part of the blocks that are all 0. We exit early.
if n_blocks <= 0:
o_offset = (
off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh
)
O_block_ptr = tl.make_block_ptr(
base=Out + o_offset,
shape=(seqlen_q, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=Out.type.element_ty)
# We still need to write 0s to the result
# tl.store(O_block_ptr,
# acc.to(Out.type.element_ty), boundary_check=(0,1))
# l_ptrs = L + off_z * hq * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q
# + offs_m
# We store inf to LSE, not -inf because in the bwd pass,
# we subtract this
# from qk which makes it -inf, such that exp(qk - inf) = 0
# for these masked blocks.
# l = tl.full([BLOCK_M], value=float("inf"), dtype=tl.float32)
# tl.store(l_ptrs, l)
# TODO: Should dropout and return encoded softmax be handled here?
return
# If MQA / GQA, set the K and V head offsets appropriately.
GROUP_SIZE: tl.constexpr = HQ // HK
if GROUP_SIZE != 1:
off_h_k = off_h_q // GROUP_SIZE
else:
off_h_k = off_h_q
n_extra_tokens = 0
if seqlen_k < BLOCK_N:
n_extra_tokens = BLOCK_N - seqlen_k
elif seqlen_k % BLOCK_N:
n_extra_tokens = seqlen_k % BLOCK_N
PADDED_HEAD: tl.constexpr = ACTUAL_BLOCK_DMODEL != BLOCK_DMODEL
# Compute pointers for all the tensors used in this kernel.
q_offset = off_z * stride_qz + off_h_q * stride_qh + cu_seqlens_q_start * stride_qm
Q_block_ptr = tl.make_block_ptr(
base=Q + q_offset,
shape=(seqlen_q, ACTUAL_BLOCK_DMODEL),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
k_offset = off_z * stride_kz + off_h_k * stride_kh + cu_seqlens_k_start * stride_kn
K_block_ptr = tl.make_block_ptr(
base=K + k_offset,
shape=(ACTUAL_BLOCK_DMODEL, seqlen_k),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1),
)
v_offset = off_z * stride_vz + off_h_k * stride_vh + cu_seqlens_k_start * stride_vk
V_block_ptr = tl.make_block_ptr(
base=V + v_offset,
shape=(seqlen_k, ACTUAL_BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0),
)
if BIAS_TYPE != 0:
bias_ptr = tl.make_block_ptr(
base=bias + off_h_q * stride_bh,
shape=(seqlen_q, seqlen_k),
strides=(stride_bm, stride_bn),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_N),
order=(1, 0),
)
else:
bias_ptr = None
if ENABLE_DROPOUT:
batch_philox_offset = (
philox_offset_base + (off_z * HQ + off_h_q) * seqlen_q * seqlen_k
)
else:
batch_philox_offset = 0
# We can ask to return the dropout mask without actually doing any dropout.
# In this case, we return an invalid pointer so indicate the mask is not i
# valid.
# TODO: Fix encoded softmax. It currently uses just h_q in the base offset.
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.make_block_ptr(
base=encoded_softmax + off_h_q * seqlen_q * seqlen_k,
shape=(seqlen_q, seqlen_k),
strides=(seqlen_k, 1),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_N),
order=(1, 0),
)
else:
encoded_softmax_block_ptr = 0
# initialize pointer to m and l
m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32)
l_i = tl.full([BLOCK_M], 1.0, dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# scale sm_scale by log_2(e) and use 2^x in the loop as we do not
# have native e^x support in HW.
qk_scale = sm_scale * 1.44269504089
# Q is loaded once at the beginning and shared by all N blocks.
q = load_fn(Q_block_ptr, True, PADDED_HEAD, "zero")
q = (q * qk_scale).to(Q_block_ptr.type.element_ty)
# Here we compute how many full and masked blocks we have.
padded_block_k = n_extra_tokens != 0
is_modulo_mn = not padded_block_k and (seqlen_q % BLOCK_M == 0)
if IS_CAUSAL:
# There are always at least BLOCK_M // BLOCK_N masked blocks.
# Additionally there might be one more due to dissimilar seqlens.
masked_blocks = BLOCK_M // BLOCK_N + (not is_modulo_mn)
else:
# Padding on Q does not need to be masked in the FA loop.
masked_blocks = padded_block_k
# if IS_CAUSAL, not is_modulo_mn does not always result in an additional
# block. In this case we might exceed n_blocks so pick the min.
masked_blocks = min(masked_blocks, n_blocks)
n_full_blocks = n_blocks - masked_blocks
block_min = 0
block_max = n_blocks * BLOCK_N
# Compute for full blocks. Here we set causal to false regardless of its
# value because there is no masking. Similarly we do not need padding.
if n_full_blocks > 0:
block_max = (n_blocks - masked_blocks) * BLOCK_N
acc, l_i, m_i = _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
# _, _, offs_n_causal, masked_blocks, n_extra_tokens, _
block_min,
block_max,
0,
0,
0,
bias_ptr,
# IS_CAUSAL, ....
False,
BLOCK_M,
BLOCK_DMODEL,
BLOCK_N,
offs_m,
offs_n,
# _, MASK_STEPS, ...
PRE_LOAD_V,
False,
ENABLE_DROPOUT,
RETURN_ENCODED_SOFTMAX,
PADDED_HEAD,
)
block_min = block_max
block_max = n_blocks * BLOCK_N
tl.debug_barrier()
# Remaining blocks, if any, are full / not masked.
if masked_blocks > 0:
offs_n_causal = offs_n + (seqlen_q - seqlen_k) if IS_CAUSAL else 0
K_block_ptr = tl.advance(K_block_ptr, (0, n_full_blocks * BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (n_full_blocks * BLOCK_N, 0))
if bias_ptr is not None:
bias_ptr = tl.advance(bias_ptr, (0, n_full_blocks * BLOCK_N))
if RETURN_ENCODED_SOFTMAX:
encoded_softmax_block_ptr = tl.advance(
encoded_softmax_block_ptr, (0, n_full_blocks)
)
acc, l_i, m_i = _attn_fwd_inner(
acc,
l_i,
m_i,
q,
K_block_ptr,
V_block_ptr,
start_m,
seqlen_k,
dropout_p,
philox_seed,
batch_philox_offset,
encoded_softmax_block_ptr,
block_min,
block_max,
offs_n_causal,
masked_blocks,
n_extra_tokens,
bias_ptr,
IS_CAUSAL,
BLOCK_M,
BLOCK_DMODEL,
BLOCK_N,
offs_m,
offs_n,
# _, MASK_STEPS, ...
PRE_LOAD_V,
True,
ENABLE_DROPOUT,
RETURN_ENCODED_SOFTMAX,
PADDED_HEAD,
)
# epilogue
acc = acc / l_i[:, None]
if ENABLE_DROPOUT:
acc = acc / (1 - dropout_p)
# If seqlen_q > seqlen_k but the delta is not a multiple of BLOCK_M,
# then we have one block with a row of all NaNs which come from computing
# softmax over a row of all -infs (-inf - inf = NaN). We check for that here
# and store 0s where there are NaNs as these rows should've been zeroed out.
end_m_idx = (start_m + 1) * BLOCK_M
start_m_idx = start_m * BLOCK_M
causal_start_idx = seqlen_q - seqlen_k
acc = acc.to(Out.type.element_ty)
if IS_CAUSAL: # noqa: SIM102
if causal_start_idx > start_m_idx and causal_start_idx < end_m_idx:
out_mask_boundary = tl.full(
(BLOCK_DMODEL,), causal_start_idx, dtype=tl.int32
)
mask_m_offsets = start_m_idx + tl.arange(0, BLOCK_M)
out_ptrs_mask = mask_m_offsets[:, None] >= out_mask_boundary[None, :]
z = 0.0
acc = tl.where(out_ptrs_mask, acc, z.to(acc.type.element_ty))
# write back LSE
# l_ptrs = L + off_z * hq * MAX_SEQLENS_Q + off_h_q * MAX_SEQLENS_Q + offs_m
# If seqlen_q not multiple of BLOCK_M, we need to mask out the last
# few rows. This is only true for the last M block. For others,
# overflow_size will be -ve
# overflow_size = end_m_idx - seqlen_q
# if overflow_size > 0:
# boundary = tl.full((BLOCK_M,), BLOCK_M - overflow_size, dtype=tl.int32)
# # This is a > check because mask being 0 blocks the store.
# l_ptrs_mask = boundary > tl.arange(0, BLOCK_M)
# tl.store(l_ptrs, m_i + tl.math.log2(l_i), mask=l_ptrs_mask)
# else:
# tl.store(l_ptrs, m_i + tl.math.log2(l_i))
# write back O
o_offset = off_z * stride_oz + cu_seqlens_q_start * stride_om + off_h_q * stride_oh
O_block_ptr = tl.make_block_ptr(
base=Out + o_offset,
shape=(seqlen_q, ACTUAL_BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
# Need boundary check on this to make sure the padding from the
# Q and KV tensors in both dims are not part of what we store back.
# TODO: Do the boundary check optionally.
tl.store(O_block_ptr, acc, boundary_check=(0, 1))
def check_args(
q,
k,
v,
o,
varlen=True,
max_seqlens=None,
cu_seqlens_q=None,
cu_seqlens_k=None,
):
assert q.dim() == k.dim() and q.dim() == v.dim()
if varlen:
assert q.dim() == 3
total_q, nheads_q, head_size = q.shape
total_k, nheads_k, _ = k.shape
assert cu_seqlens_q is not None
assert cu_seqlens_k is not None
assert len(cu_seqlens_q) == len(cu_seqlens_k)
else:
assert q.dim() == 4
batch, nheads_q, seqlen_q, head_size = q.shape
_, nheads_k, seqlen_k, _ = k.shape
assert max_seqlens > 0
assert k.shape == v.shape
assert q.shape[-1] == k.shape[-1] and q.shape[-1] == v.shape[-1]
# TODO: Change assert if we support qkl f8 and v f16
assert q.dtype == k.dtype and q.dtype == v.dtype
# TODO: Fix assert to check head size <=256 once supported
assert head_size <= 128
assert o.shape == q.shape
assert (nheads_q % nheads_k) == 0
class _attention(torch.autograd.Function):
@staticmethod
def forward(
ctx,
q,
k,
v,
o,
cu_seqlens_q,
cu_seqlens_k,
max_seqlens_q,
max_seqlens_k,
causal=False,
sm_scale=1.0,
bias=None,
):
if o is None:
o = torch.empty_like(q, dtype=v.dtype)
check_args(
q,
k,
v,
o,
varlen=True,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
)
if True: # varlen
total_q, nheads_q, head_size = q.shape
total_k, nheads_k, _ = k.shape
batch = len(cu_seqlens_q) - 1
q_strides = (0, q.stride(1), q.stride(0), q.stride(2))
k_strides = (0, k.stride(1), k.stride(0), k.stride(2))
v_strides = (0, v.stride(1), v.stride(0), v.stride(2))
o_strides = (0, o.stride(1), o.stride(0), o.stride(2))
else:
batch, seqlen_q, nheads_q, head_size = q.shape
_, seqlen_k, nheads_k, _ = k.shape
q_strides = (q.stride(0), q.stride(2), q.stride(1), q.stride(3))
k_strides = (k.stride(0), k.stride(2), k.stride(1), k.stride(3))
v_strides = (v.stride(0), v.stride(2), v.stride(1), v.stride(3))
o_strides = (o.stride(0), o.stride(2), o.stride(1), o.stride(3))
# Get closest power of 2 over or equal to 32.
padded_d_model = 1 << (head_size - 1).bit_length()
padded_d_model = max(padded_d_model, 16)
def grid(META):
return triton.cdiv(max_seqlens_q, META["BLOCK_M"]), nheads_q, batch
encoded_softmax = None
# Seed the RNG so we get reproducible results for testing.
philox_seed = 0x1BF52
philox_offset = 0x1D4B42
if bias is not None:
bias_strides = (
bias.stride(0),
bias.stride(1),
bias.stride(2),
bias.stride(3),
)
else:
bias_strides = (0, 0, 0, 0)
attn_fwd[grid](
q,
k,
v,
bias,
sm_scale,
None,
o,
*q_strides,
*k_strides,
*v_strides,
*o_strides,
*bias_strides,
cu_seqlens_q,
cu_seqlens_k,
dropout_p=0.0,
philox_seed=philox_seed,
philox_offset_base=philox_offset,
encoded_softmax=encoded_softmax,
HQ=nheads_q,
HK=nheads_k,
ACTUAL_BLOCK_DMODEL=head_size,
MAX_SEQLENS_Q=max_seqlens_q,
MAX_SEQLENS_K=max_seqlens_k,
IS_CAUSAL=causal,
VARLEN=True,
BLOCK_DMODEL=padded_d_model,
BIAS_TYPE=0 if bias is None else 1,
ENABLE_DROPOUT=False,
RETURN_ENCODED_SOFTMAX=False,
)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = head_size
ctx.causal = causal
ctx.dropout_p = 0.0
ctx.philox_seed = philox_seed
ctx.philox_offset = philox_offset
ctx.encoded_softmax = encoded_softmax
ctx.return_encoded_softmax = False
return o, encoded_softmax
triton_attention = _attention.apply
| text-generation-inference/server/text_generation_server/layers/attention/flash_attn_triton.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/flash_attn_triton.py",
"repo_id": "text-generation-inference",
"token_count": 14692
} | 224 |
import time
import torch.nn as nn
import math
import json
import os
import torch
import transformers
from texttable import Texttable
from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer
from huggingface_hub import HfApi
from accelerate import init_empty_weights
from text_generation_server.utils import initialize_torch_distributed, Weights
from text_generation_server.utils.hub import weight_files
from text_generation_server.layers.gptq.quant_linear import QuantLinear
from loguru import logger
from typing import Optional
from text_generation_server.layers.gptq.utils import torch_snr_error
from text_generation_server.utils.weights import DefaultWeightsLoader, UnquantizedWeight
DEV = torch.device("cuda:0")
class Quantizer(nn.Module):
def __init__(self, shape=1):
super(Quantizer, self).__init__()
self.register_buffer("maxq", torch.tensor(0))
self.register_buffer("scale", torch.zeros(shape))
self.register_buffer("zero", torch.zeros(shape))
def configure(
self,
bits,
perchannel=False,
sym=True,
mse=False,
norm=2.4,
grid=100,
maxshrink=0.8,
trits=False,
):
self.maxq = torch.tensor(2**bits - 1)
self.perchannel = perchannel
self.sym = sym
self.mse = mse
self.norm = norm
self.grid = grid
self.maxshrink = maxshrink
if trits:
self.maxq = torch.tensor(-1)
self.scale = torch.zeros_like(self.scale)
def _quantize(self, x, scale, zero, maxq):
if maxq < 0:
return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero
q = torch.clamp(torch.round(x / scale) + zero, 0, maxq)
return scale * (q - zero)
def find_params(self, x, weight=False):
dev = x.device
self.maxq = self.maxq.to(dev)
shape = x.shape
if self.perchannel:
if weight:
x = x.flatten(1)
else:
if len(shape) == 4:
x = x.permute([1, 0, 2, 3])
x = x.flatten(1)
if len(shape) == 3:
x = x.reshape((-1, shape[-1])).t()
if len(shape) == 2:
x = x.t()
else:
x = x.flatten().unsqueeze(0)
tmp = torch.zeros(x.shape[0], device=dev)
xmin = torch.minimum(x.min(1)[0], tmp)
xmax = torch.maximum(x.max(1)[0], tmp)
if self.sym:
xmax = torch.maximum(torch.abs(xmin), xmax)
tmp = xmin < 0
if torch.any(tmp):
xmin[tmp] = -xmax[tmp]
tmp = (xmin == 0) & (xmax == 0)
xmin[tmp] = -1
xmax[tmp] = +1
if self.maxq < 0:
self.scale = xmax
self.zero = xmin
else:
self.scale = (xmax - xmin) / self.maxq
if self.sym:
self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2)
else:
self.zero = torch.round(-xmin / self.scale)
if self.mse:
best = torch.full([x.shape[0]], float("inf"), device=dev)
for i in range(int(self.maxshrink * self.grid)):
p = 1 - i / self.grid
xmin1 = p * xmin
xmax1 = p * xmax
scale1 = (xmax1 - xmin1) / self.maxq
zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero
q = self._quantize(
x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq
)
q -= x
q.abs_()
q.pow_(self.norm)
err = torch.sum(q, 1)
tmp = err < best
if torch.any(tmp):
best[tmp] = err[tmp]
self.scale[tmp] = scale1[tmp]
self.zero[tmp] = zero1[tmp]
if not self.perchannel:
if weight:
tmp = shape[0]
else:
tmp = shape[1] if len(shape) != 3 else shape[2]
self.scale = self.scale.repeat(tmp)
self.zero = self.zero.repeat(tmp)
if weight:
shape = [-1] + [1] * (len(shape) - 1)
self.scale = self.scale.reshape(shape)
self.zero = self.zero.reshape(shape)
return
if len(shape) == 4:
self.scale = self.scale.reshape((1, -1, 1, 1))
self.zero = self.zero.reshape((1, -1, 1, 1))
if len(shape) == 3:
self.scale = self.scale.reshape((1, 1, -1))
self.zero = self.zero.reshape((1, 1, -1))
if len(shape) == 2:
self.scale = self.scale.unsqueeze(0)
self.zero = self.zero.unsqueeze(0)
def quantize(self, x):
if self.ready():
return self._quantize(x, self.scale, self.zero, self.maxq)
return x
def enabled(self):
return self.maxq > 0
def ready(self):
return torch.all(self.scale != 0)
class GPTQ:
def __init__(self, layer, observe=False):
self.layer = layer
self.dev = self.layer.weight.device
W = layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
self.rows = W.shape[0]
self.columns = W.shape[1]
self.H = torch.zeros((self.columns, self.columns), device=self.dev)
self.nsamples = 0
self.quantizer = Quantizer()
self.observe = observe
def add_batch(self, inp, out):
# Hessian H = 2 X XT + λ I
if self.observe:
self.inp1 = inp
self.out1 = out
else:
self.inp1 = None
self.out1 = None
if len(inp.shape) == 2:
inp = inp.unsqueeze(0)
tmp = inp.shape[0]
if isinstance(self.layer, nn.Linear) or isinstance(
self.layer, transformers.Conv1D
):
if len(inp.shape) == 3:
inp = inp.reshape((-1, inp.shape[-1]))
inp = inp.t()
if isinstance(self.layer, nn.Conv2d):
unfold = nn.Unfold(
self.layer.kernel_size,
dilation=self.layer.dilation,
padding=self.layer.padding,
stride=self.layer.stride,
)
inp = unfold(inp)
inp = inp.permute([1, 0, 2])
inp = inp.flatten(1)
self.H *= self.nsamples / (self.nsamples + tmp)
self.nsamples += tmp
# inp = inp.float()
inp = math.sqrt(2 / self.nsamples) * inp.float()
# self.H += 2 / self.nsamples * inp.matmul(inp.t())
self.H += inp.matmul(inp.t())
def print_loss(self, name, q_weight, weight_error, timecost):
table = Texttable()
length = 28
name = (
(name + " " * (length - len(name)))
if len(name) <= length
else name[:length]
)
table.header(["name", "weight_error", "fp_inp_SNR", "q_inp_SNR", "time"])
# assign weight
self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to(
self.layer.weight.data.dtype
)
if self.inp1 is not None:
# quantize input to int8
quantizer = Quantizer()
quantizer.configure(8, perchannel=False, sym=True, mse=False)
quantizer.find_params(self.inp1)
q_in = quantizer.quantize(self.inp1).type(torch.float16)
q_out = self.layer(q_in)
# get kinds of SNR
q_SNR = torch_snr_error(q_out, self.out1).item()
fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item()
else:
q_SNR = "-"
fp_SNR = "-"
table.add_row([name, weight_error, fp_SNR, q_SNR, timecost])
print(table.draw().split("\n")[-2])
def fasterquant(
self, blocksize=128, percdamp=0.01, groupsize=-1, act_order=False, name=""
):
self.layer.to(self.dev)
W = self.layer.weight.data.clone()
if isinstance(self.layer, nn.Conv2d):
W = W.flatten(1)
if isinstance(self.layer, transformers.Conv1D):
W = W.t()
W = W.float()
tick = time.time()
if not self.quantizer.ready():
self.quantizer.find_params(W, weight=True)
H = self.H
if not self.observe:
del self.H
dead = torch.diag(H) == 0
H[dead, dead] = 1
W[:, dead] = 0
if act_order:
perm = torch.argsort(torch.diag(H), descending=True)
W = W[:, perm]
H = H[perm][:, perm]
Losses = torch.zeros_like(W)
Q = torch.zeros_like(W)
damp = percdamp * torch.mean(torch.diag(H))
diag = torch.arange(self.columns, device=self.dev)
H[diag, diag] += damp
H = torch.linalg.cholesky(H)
H = torch.cholesky_inverse(H)
try:
H = torch.linalg.cholesky(H, upper=True)
except Exception:
# Addition because Falcon fails on h_to_4h
H = torch.linalg.cholesky(
H + 1e-5 * torch.eye(H.shape[0]).to(H.device), upper=True
)
Hinv = H
g_idx = []
scale = []
zero = []
now_idx = 1
for i1 in range(0, self.columns, blocksize):
i2 = min(i1 + blocksize, self.columns)
count = i2 - i1
W1 = W[:, i1:i2].clone()
Q1 = torch.zeros_like(W1)
Err1 = torch.zeros_like(W1)
Losses1 = torch.zeros_like(W1)
Hinv1 = Hinv[i1:i2, i1:i2]
for i in range(count):
w = W1[:, i]
d = Hinv1[i, i]
if groupsize != -1:
if (i1 + i) % groupsize == 0:
self.quantizer.find_params(
W[:, (i1 + i) : (i1 + i + groupsize)], weight=True
)
if ((i1 + i) // groupsize) - now_idx == -1:
scale.append(self.quantizer.scale)
zero.append(self.quantizer.zero)
now_idx += 1
q = self.quantizer.quantize(w.unsqueeze(1)).flatten()
Q1[:, i] = q
Losses1[:, i] = (w - q) ** 2 / d**2
err1 = (w - q) / d
W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))
Err1[:, i] = err1
Q[:, i1:i2] = Q1
Losses[:, i1:i2] = Losses1 / 2
W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])
torch.cuda.synchronize()
error = torch.sum(Losses).item()
groupsize = groupsize if groupsize != -1 else self.columns
g_idx = [i // groupsize for i in range(self.columns)]
g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device)
if act_order:
invperm = torch.argsort(perm)
Q = Q[:, invperm]
g_idx = g_idx[invperm]
if isinstance(self.layer, transformers.Conv1D):
Q = Q.t()
self.print_loss(
name=name, q_weight=Q, weight_error=error, timecost=(time.time() - tick)
)
if scale == []:
scale.append(self.quantizer.scale)
zero.append(self.quantizer.zero)
scale = torch.cat(scale, dim=1)
zero = torch.cat(zero, dim=1)
return scale, zero, g_idx, error
def free(self):
self.inp1 = None
self.out1 = None
self.H = None
self.Losses = None
self.Trace = None
torch.cuda.empty_cache()
def get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
trainenc = tokenizer("\n\n".join(traindata["text"]), return_tensors="pt")
testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt")
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("ptb_text_only", "penn_treebank", split="train")
valdata = load_dataset("ptb_text_only", "penn_treebank", split="validation")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
trainenc = tokenizer("\n\n".join(traindata["sentence"]), return_tensors="pt")
testenc = tokenizer("\n\n".join(valdata["sentence"]), return_tensors="pt")
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"train": "en/c4-train.00000-of-01024.json.gz"},
split="train",
use_auth_token=False,
)
valdata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
split="validation",
use_auth_token=False,
)
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]["text"], return_tensors="pt")
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
import random
random.seed(0)
valenc = []
for _ in range(256):
while True:
i = random.randint(0, len(valdata) - 1)
tmp = tokenizer(valdata[i]["text"], return_tensors="pt")
if tmp.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
valenc.append(tmp.input_ids[:, i:j])
valenc = torch.hstack(valenc)
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset("ptb_text_only", "penn_treebank", split="train")
testdata = load_dataset("ptb_text_only", "penn_treebank", split="test")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
trainenc = tokenizer(" ".join(traindata["sentence"]), return_tensors="pt")
testenc = tokenizer(" ".join(testdata["sentence"]), return_tensors="pt")
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
return trainloader, testenc
def get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code):
from datasets import load_dataset
traindata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"train": "en/c4-train.00000-of-01024.json.gz"},
split="train",
)
valdata = load_dataset(
"allenai/c4",
"allenai--c4",
data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
split="validation",
)
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False, trust_remote_code=trust_remote_code
)
except Exception:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=True, trust_remote_code=trust_remote_code
)
import random
random.seed(seed)
trainloader = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(traindata) - 1)
trainenc = tokenizer(traindata[i]["text"], return_tensors="pt")
if trainenc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = trainenc.input_ids[:, i:j]
tar = inp.clone()
tar[:, :-1] = -100
trainloader.append((inp, tar))
valenc = tokenizer(" ".join(valdata[:1100]["text"]), return_tensors="pt")
valenc = valenc.input_ids[:, : (256 * seqlen)]
class TokenizerWrapper:
def __init__(self, input_ids):
self.input_ids = input_ids
valenc = TokenizerWrapper(valenc)
return trainloader, valenc
def get_loaders(
name, nsamples=128, seed=0, seqlen=2048, model_id="", trust_remote_code=False
):
if "wikitext2" in name:
return get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code)
if "ptb" in name:
if "new" in name:
return get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code)
return get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code)
if "c4" in name:
if "new" in name:
return get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code)
return get_c4(nsamples, seed, seqlen, model_id, trust_remote_code)
def find_layers(module, layers=(nn.Conv2d, nn.Linear), name=""):
# Skip last lm_head linear
# Need isintance Falcon is inheriting Linear.
if isinstance(module, layers) and "lm_head" not in name:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(
find_layers(
child, layers=layers, name=name + "." + name1 if name != "" else name1
)
)
return res
@torch.no_grad()
def sequential(
model,
dataloader,
dev,
nsamples,
bits,
groupsize,
*,
hooks,
percdamp=0.01,
sym: bool = False,
act_order: bool = False,
):
print("Starting ...")
use_cache = model.config.use_cache
model.config.use_cache = False
try:
layers = model.model.layers
prefix = "model.layers"
except Exception:
layers = model.transformer.h
prefix = "transformer.h"
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros(
(nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev
)
cache = {"i": 0}
extra = {}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache["i"]] = inp
cache["i"] += 1
extra.update(kwargs.copy())
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].cuda())
except ValueError:
pass
layers[0] = layers[0].module
# layers[0] = layers[0].cpu()
# model.model.embed_tokens = model.model.embed_tokens.cpu()
# model.model.norm = model.model.norm.cpu()
torch.cuda.empty_cache()
for hook in hooks:
hook.remove()
outs = torch.zeros_like(inps)
extra = {
k: v.to(dev) if isinstance(v, torch.Tensor) else v for k, v in extra.items()
}
print("Ready.")
quantizers = {}
for i in range(len(layers)):
print(f"Quantizing layer {i+1}/{len(layers)}..")
print("+------------------+--------------+------------+-----------+-------+")
print("| name | weight_error | fp_inp_SNR | q_inp_SNR | time |")
print("+==================+==============+============+===========+=======+")
layer = layers[i]
layer.load()
full = find_layers(layer)
sequential = [list(full.keys())]
for names in sequential:
subset = {n: full[n] for n in names}
gptq = {}
for name in subset:
gptq[name] = GPTQ(subset[name])
gptq[name].quantizer.configure(
bits, perchannel=True, sym=sym, mse=False
)
pass
def add_batch(name):
nonlocal gptq
def tmp(_, inp, out):
gptq[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in subset:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), **extra)[0]
for h in handles:
h.remove()
for name in subset:
scale, zero, g_idx, error = gptq[name].fasterquant(
percdamp=percdamp,
groupsize=groupsize,
act_order=act_order,
name=name,
)
quantizers[f"{prefix}.{i}.{name}"] = (
gptq[name].quantizer.cpu(),
scale.cpu(),
zero.cpu(),
g_idx.cpu(),
bits,
groupsize,
)
gptq[name].free()
for j in range(nsamples):
outs[j] = layer(inps[j].unsqueeze(0), **extra)[0]
layer.unload()
del layer
del gptq
torch.cuda.empty_cache()
inps, outs = outs, inps
print("+------------------+--------------+------------+-----------+-------+")
print("\n")
model.config.use_cache = use_cache
return quantizers
def make_quant_linear(module, names, bits, groupsize, name=""):
if isinstance(module, QuantLinear):
return
for attr in dir(module):
tmp = getattr(module, attr)
name1 = name + "." + attr if name != "" else attr
if name1 in names:
delattr(module, attr)
setattr(
module,
attr,
QuantLinear.new(
bits,
groupsize,
tmp.in_features,
tmp.out_features,
tmp.bias is not None,
),
)
for name1, child in module.named_children():
make_quant_linear(
child, names, bits, groupsize, name + "." + name1 if name != "" else name1
)
# TODO: perform packing on GPU
def pack(model, quantizers, bits, groupsize):
layers = find_layers(model)
layers = {n: layers[n] for n in quantizers}
make_quant_linear(model, quantizers, bits, groupsize)
qlayers = find_layers(model, (QuantLinear,))
print("Packing ...")
for name in qlayers:
print(name)
quantizers[name], scale, zero, g_idx, _, _ = quantizers[name]
qlayers[name].pack(layers[name], scale, zero, g_idx)
print("Done.")
return model
def setdeepattr(module, full_name, tensor):
current = module
tokens = full_name.split(".")
for token in tokens[:-1]:
current = getattr(current, token)
setattr(current, tokens[-1], tensor)
def getdeepattr(module, full_name):
current = module
tokens = full_name.split(".")
for token in tokens:
current = getattr(current, token)
return current
def load_weights_pre_hook(module_name, weights, recursive=False):
def inner(module, args):
print(f"Pre hook {module_name}")
local_params = {}
for k, v in module.named_parameters():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for k, v in module.named_buffers():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for local_param in local_params:
current_tensor = getdeepattr(module, local_param)
if current_tensor.device == torch.device("meta"):
# print(f"Loading {local_param}")
if module_name:
tensor_name = f"{module_name}.{local_param}"
else:
tensor_name = local_param
tensor = weights.get_tensor(tensor_name)
setdeepattr(module, local_param, nn.Parameter(tensor))
else:
tensor = current_tensor.to(device=torch.device("cuda:0"))
if current_tensor.requires_grad:
tensor = nn.Parameter(tensor)
setdeepattr(module, local_param, tensor)
return inner
def load_weights_post_hook(module_name, weights, recursive=False):
def inner(module, args, output):
print(f"Post hook {module_name}")
local_params = {}
for k, v in module.named_parameters():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for k, v in module.named_buffers():
if not recursive and k.count(".") != 1:
continue
local_params[k] = v
for local_param in local_params:
# print(f"Unloading {local_param}")
current_tensor = getdeepattr(module, local_param)
setdeepattr(
module,
local_param,
nn.Parameter(current_tensor.to(device=torch.device("cpu"))),
)
return output
return inner
def quantize(
model_id: str,
bits: int,
groupsize: int,
output_dir: str,
revision: str,
trust_remote_code: bool,
upload_to_model_id: Optional[str],
percdamp: float,
act_order: bool,
sym: bool,
):
print("loading model")
config = AutoConfig.from_pretrained(
model_id,
trust_remote_code=trust_remote_code,
)
with init_empty_weights():
model = AutoModelForCausalLM.from_config(
config, torch_dtype=torch.float16, trust_remote_code=trust_remote_code
)
model = model.eval()
print("LOADED model")
files = weight_files(model_id, revision, extension=".safetensors")
process_group, _, _ = initialize_torch_distributed()
weights = Weights(
files,
device=torch.device("cuda:0"),
dtype=torch.float16,
process_group=process_group,
aliases={"embed_tokens.weight": ["lm_head.weight"]},
weights_loader=DefaultWeightsLoader(UnquantizedWeight),
)
hooks = []
for name, module in model.named_modules():
def load(module, name):
def _load():
load_weights_pre_hook(name, weights, recursive=True)(module, None)
return _load
def unload(module, name):
def _unload():
load_weights_post_hook(name, weights, recursive=True)(
module, None, None
)
return _unload
module.load = load(module, name)
module.unload = unload(module, name)
hooks.append(
module.register_forward_pre_hook(load_weights_pre_hook(name, weights))
)
hooks.append(
module.register_forward_hook(load_weights_post_hook(name, weights))
)
model.seqlen = 2048
dataset = "wikitext2"
nsamples = 128
seed = None
dataloader, testloader = get_loaders(
dataset,
nsamples=nsamples,
seed=seed,
model_id=model_id,
seqlen=model.seqlen,
trust_remote_code=trust_remote_code,
)
tick = time.time()
quantizers = sequential(
model,
dataloader,
DEV,
nsamples,
bits,
groupsize,
percdamp=percdamp,
act_order=act_order,
hooks=hooks,
sym=sym,
)
print(time.time() - tick)
pack(model, quantizers, bits, groupsize)
from safetensors.torch import save_file
from transformers.modeling_utils import shard_checkpoint
state_dict = model.state_dict()
state_dict = {k: v.cpu().contiguous() for k, v in state_dict.items()}
max_shard_size = "10GB"
shards, index = shard_checkpoint(
state_dict, max_shard_size=max_shard_size, weights_name="model.safetensors"
)
os.makedirs(output_dir, exist_ok=True)
for shard_file, shard in shards.items():
save_file(
shard,
os.path.join(output_dir, shard_file),
metadata={
"format": "pt",
"quantized": "gptq",
"origin": "text-generation-inference",
},
)
if index is None:
path_to_weights = os.path.join(output_dir, "model.safetensors")
logger.info(f"Model weights saved in {path_to_weights}")
else:
save_index_file = "model.safetensors.index.json"
save_index_file = os.path.join(output_dir, save_index_file)
with open(save_index_file, "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
logger.info(
f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
f"index located at {save_index_file}."
)
config = AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code)
config.quantization_config = {
"bits": bits,
"group_size": groupsize,
"damp_percent": percdamp,
"desc_act": act_order,
"static_groups": False,
"sym": sym,
"quant_method": "gptq",
}
config.save_pretrained(output_dir)
logger.info("Saved config")
logger.info("Saving tokenizer")
tokenizer = AutoTokenizer.from_pretrained(
model_id, trust_remote_code=trust_remote_code
)
tokenizer.save_pretrained(output_dir)
logger.info("Saved tokenizer")
if upload_to_model_id:
api = HfApi()
api.upload_folder(
folder_path=output_dir, repo_id=upload_to_model_id, repo_type="model"
)
| text-generation-inference/server/text_generation_server/layers/gptq/quantize.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/gptq/quantize.py",
"repo_id": "text-generation-inference",
"token_count": 16163
} | 225 |
import torch
import torch.distributed
from typing import Optional, Type
from transformers import (
PreTrainedTokenizerBase,
)
from text_generation_server.models import CausalLM
from text_generation_server.models.causal_lm import CausalLMBatch
from text_generation_server.pb import generate_pb2
class BloomCausalLMBatch(CausalLMBatch):
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "CausalLMBatch":
batch = super().from_pb(pb=pb, tokenizer=tokenizer, dtype=dtype, device=device)
batch.keys_head_dim_last = False
return batch
class BLOOMSharded(CausalLM):
@property
def batch_type(self) -> Type[CausalLMBatch]:
return BloomCausalLMBatch
def forward(
self, input_ids, attention_mask, position_ids, past_key_values: Optional = None
):
outputs, speculative_logits = self.model.forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=True,
)
logits = outputs.logits
return logits, speculative_logits, outputs.past_key_values
| text-generation-inference/server/text_generation_server/models/bloom.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/bloom.py",
"repo_id": "text-generation-inference",
"token_count": 543
} | 226 |
# coding=utf-8
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed
from torch import nn
from typing import Optional, List, Tuple
from text_generation_server.layers.tensor_parallel import TensorParallelColumnLinear
from text_generation_server.layers.attention import Seqlen
from text_generation_server.models.custom_modeling.vlm import (
load_text_model,
load_vision_model,
)
class PaliGemmaForConditionalGeneration(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
config.vision_config.quantize = config.quantize
self.vision_tower = load_vision_model(
prefix="vision_tower" if not prefix else f"{prefix}.vision_tower",
config=config.vision_config,
weights=weights,
)
self.post_vision_tower_layernorm = nn.LayerNorm.load(
prefix="vision_tower.vision_model.post_layernorm",
weights=weights,
eps=config.vision_config.layer_norm_eps,
)
self.multi_modal_projector = TensorParallelColumnLinear.load(
config,
prefix="multi_modal_projector.linear",
weights=weights,
bias=True,
)
self.vocab_size = config.vocab_size
self.config = config
text_config = config.text_config
text_config.speculator = config.speculator
text_config.quantize = config.quantize
self.text_model = load_text_model(
prefix="language_model" if not prefix else f"{prefix}.language_model",
config=config.text_config,
weights=weights,
)
self.pad_token_id = (
config.pad_token_id if config.pad_token_id is not None else -1
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor] = None,
lm_head_indices: Optional[torch.Tensor] = None,
pixel_values: torch.FloatTensor = None,
# Unused here
pixel_attention_mask: Optional[torch.BoolTensor] = None,
image_sizes: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
inputs_embeds = self.text_model.embed_tokens(input_ids)
# TODO This is odd but apparently pali gemma position ids start at 1.
if cu_seqlen_prefill is not None:
max_s += 1
position_ids += 1
if pixel_values is not None:
pixel_values = pixel_values.to(dtype=inputs_embeds.dtype)
image_outputs = self.vision_tower(pixel_values)
last_hidden_state = self.post_vision_tower_layernorm(
image_outputs.last_hidden_state
)
image_features = self.multi_modal_projector(last_hidden_state)
# mask where image or padding tokens
mask = input_ids == self.config.image_token_index
# insert image features into input embeddings
inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1])
hidden_states = self.text_model.model(
inputs_embeds=inputs_embeds,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits, speculative_logits = self.text_model.lm_head(hidden_states)
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_pali_gemma_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 1938
} | 227 |
# coding=utf-8
# Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch GPTNeoX model."""
from typing import Optional, Tuple, Union
import os
import torch
import torch.distributed
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
SpeculativeHead,
)
CUSTOM_KERNELS_ENABLED = False
if (
torch.cuda.is_available()
and not os.environ.get("DISABLE_CUSTOM_KERNELS", "False") == "True"
):
try:
from custom_kernels import fused_attention_cuda
CUSTOM_KERNELS_ENABLED = True
except ImportError:
pass
def make_causal_mask(
input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int
) -> torch.BoolTensor:
"""
Make causal mask used for self-attention.
"""
batch_size, target_length = input_ids_shape
mask = torch.ones(
(target_length, target_length + past_key_values_length),
dtype=torch.bool,
device=device,
)
mask = mask.triu(1 + past_key_values_length)
expanded_mask = mask.unsqueeze(0).expand(
batch_size, target_length, target_length + past_key_values_length
)
return expanded_mask
def expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor:
"""
Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`.
"""
batch_size, src_length = mask.shape
tgt_length = tgt_length if tgt_length is not None else src_length
expanded_mask = ~(mask[:, None, :].to(torch.bool))
return expanded_mask.expand(batch_size, tgt_length, src_length)
def prepare_attn_mask(
attention_mask: torch.Tensor,
input_shape: Tuple[int, int],
past_key_values_length: int,
) -> torch.BoolTensor:
# create causal mask
# [batch_size, seq_length] -> [batch_size, tgt_length, src_length]
combined_attention_mask = None
device = attention_mask.device
_, src_length = input_shape
if src_length > 1:
combined_attention_mask = make_causal_mask(
input_shape, device=device, past_key_values_length=past_key_values_length
)
# [batch_size, seq_length] -> [batch_size, tgt_length, src_length]
expanded_attn_mask = expand_mask(attention_mask, tgt_length=src_length)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask | combined_attention_mask
)
return combined_attention_mask
class GPTNeoXPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
class GPTNeoXAttention(nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_attention_heads
self.rotary_ndims = int(self.head_size * config.rotary_pct)
# ??? TODO
# self.register_buffer(
# "bias",
# torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
# 1, 1, max_positions, max_positions
# ),
# )
# self.register_buffer("masked_bias", torch.tensor(-1e9))
self.rotary_emb = RotaryEmbedding(
self.rotary_ndims,
config.max_position_embeddings,
base=config.rotary_emb_base,
)
self.rotary_emb.inv_freq = nn.Parameter(
weights.get_tensor(f"{prefix}.rotary_emb.inv_freq")
)
self.inv_norm_factor = 1.0 / torch.sqrt(
torch.tensor(self.head_size, dtype=torch.float32)
).to(torch.get_default_dtype())
if self.num_attention_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_attention_heads` must be divisible by `num_shards` "
f"(got `num_attention_heads`: {self.num_attention_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_attention_heads = (
self.num_attention_heads // weights.process_group.size()
)
self.query_key_value = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.query_key_value", weights=weights, bias=True
)
self.dense = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.dense", weights=weights, bias=True
)
def forward(
self,
hidden_states,
position_ids,
attention_mask,
head_mask=None,
layer_past=None,
use_cache=False,
output_attentions=False,
):
has_layer_past = layer_past is not None
# Compute QKV
# Attention heads [batch, seq_len, hidden_size]
# --> [batch, seq_len, (np * 3 * head_size)]
qkv = self.query_key_value(hidden_states)
# [batch, seq_len, (num_heads * 3 * head_size)]
# --> [batch, seq_len, num_heads, 3 * head_size]
new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)
qkv = qkv.view(*new_qkv_shape).permute(0, 2, 1, 3)
# [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]
query, key, value = qkv.split(self.head_size, -1)
# Compute token offset for rotary embeddings (when decoding)
seq_len = key.shape[-2]
if has_layer_past:
seq_len += layer_past[0].shape[-2]
# Compute rotary embeddings on rotary_ndims
query_rot = query[..., : self.rotary_ndims]
key_rot = key[..., : self.rotary_ndims]
query_rot, key_rot = self.rotary_emb(query_rot, key_rot, position_ids, seq_len)
query[..., : self.rotary_ndims] = query_rot
key[..., : self.rotary_ndims] = key_rot
if CUSTOM_KERNELS_ENABLED:
attn_output, present, attn_weights = fused_attention_cuda.forward(
query,
key,
value,
layer_past,
attention_mask,
head_mask,
self.inv_norm_factor,
self.num_attention_heads,
use_cache,
)
else:
# Cache QKV values
if has_layer_past:
past_key = layer_past[0]
past_value = layer_past[1]
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
present = (key, value) if use_cache else None
# Compute attention
attn_output, attn_weights = self._attn(
query, key, value, attention_mask, head_mask
)
# Reshape outputs
attn_output = self._merge_heads(
attn_output, self.num_attention_heads, self.head_size
)
attn_output = self.dense(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs
@classmethod
def _split_heads(cls, tensor, num_attention_heads, attn_head_size):
"""
Splits hidden dim into attn_head_size and num_attention_heads
"""
# tensor: [bs, seq_len, hidden_size]
new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
# -> [bs, seq_len, num_attention_heads, attn_head_size]
tensor = tensor.view(new_shape)
# -> [bs, num_attention_heads, seq_len, attn_head_size]
tensor = tensor.permute(0, 2, 1, 3)
return tensor
@classmethod
def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden dim
"""
# tensor [bs, num_attention_heads, seq_len, attn_head_size]
tensor = tensor.permute(0, 2, 1, 3).contiguous()
# -> [bs, seq_len, num_attention_heads, attn_head_size]
tensor = tensor.view(
tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size
)
# -> [bs, seq_len, hidden_size]
return tensor
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
# q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]
# compute causal mask from causal mask buffer
batch_size, num_attention_heads, query_length, attn_head_size = query.size()
key_length = key.size(-2)
query = query.reshape(
batch_size * num_attention_heads, query_length, attn_head_size
)
key = key.reshape(batch_size * num_attention_heads, key_length, attn_head_size)
attn_scores = torch.zeros(
1,
dtype=query.dtype,
device=key.device,
).expand(batch_size * num_attention_heads, query_length, key_length)
attn_scores = torch.baddbmm(
attn_scores,
query,
key.transpose(1, 2),
beta=1.0,
alpha=self.inv_norm_factor,
)
# cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length]
input_dtype = attn_scores.dtype
if input_dtype in [torch.float16, torch.bfloat16]:
attn_scores = attn_scores.to(torch.float)
attn_scores = torch.where(
attention_mask, torch.finfo(attn_scores.dtype).min, attn_scores
)
attn_scores = attn_scores.view(
batch_size, num_attention_heads, query_length, key_length
)
attn_weights = nn.functional.softmax(attn_scores, dim=-1)
attn_weights = attn_weights.to(value.dtype)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
class RotaryEmbedding(torch.nn.Module):
def __init__(self, dim, max_position_embeddings, base=10000, device=None):
super().__init__()
self.true_inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2).float().to(device) / dim)
)
self.register_buffer("inv_freq", self.true_inv_freq)
# Build here to make `torch.jit.trace` work.
self.max_seq_len_cached = max_position_embeddings
self.cos_cached = None
self.sin_cached = None
@staticmethod
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@staticmethod
def _create_cos_sin(inv_freq, max_position_embeddings, dtype, device):
t = torch.arange(
max_position_embeddings, device=inv_freq.device, dtype=inv_freq.dtype
)
freqs = torch.einsum("i,j->ij", t, inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
return emb.cos().to(device).to(dtype), emb.sin().to(device).to(dtype)
def forward(self, q, k, position_ids, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
if (
seq_len > self.max_seq_len_cached
or self.cos_cached is None
or self.sin_cached is None
):
if seq_len > self.max_seq_len_cached:
self.max_seq_len_cached = seq_len
self.cos_cached, self.sin_cached = self._create_cos_sin(
self.true_inv_freq, self.max_seq_len_cached, q.dtype, q.device
)
return rotary_forward(q, k, self.cos_cached, self.sin_cached, position_ids)
@torch.jit.script
def rotary_forward(q, k, cos, sin, position_ids):
cos = cos[position_ids].unsqueeze(1)
sin = sin[position_ids].unsqueeze(1)
chunk_size = q.shape[-1] // 2
q1, q2 = q.split(chunk_size, -1)
q_rotated = torch.cat((-q2, q1), dim=-1)
k1, k2 = k.split(chunk_size, -1)
k_rotated = torch.cat((-k2, k1), dim=-1)
q_embed = (q * cos) + (q_rotated * sin)
k_embed = (k * cos) + (k_rotated * sin)
return q_embed, k_embed
class GPTNeoXMLP(nn.Module):
def __init__(self, config, prefix, weights):
super().__init__()
self.act = (
ACT2FN[config.hidden_act]
if "gelu_fast" not in config.hidden_act
else lambda x: torch.nn.functional.gelu(x, approximate="tanh")
)
self.dense_h_to_4h = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.dense_h_to_4h", weights=weights, bias=True
)
self.dense_4h_to_h = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.dense_4h_to_h", weights=weights, bias=True
)
def forward(self, hidden_states):
hidden_states = self.dense_h_to_4h(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.dense_4h_to_h(hidden_states)
return hidden_states
class GPTNeoXLayer(nn.Module):
def __init__(self, layer_id, prefix: str, config, weights):
super().__init__()
self.use_parallel_residual = config.use_parallel_residual
self.input_layernorm = nn.LayerNorm.load(
prefix=f"{prefix}.layers.{layer_id}.input_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
self.post_attention_layernorm = nn.LayerNorm.load(
prefix=f"{prefix}.layers.{layer_id}.post_attention_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
self.attention = GPTNeoXAttention(
config, prefix=f"{prefix}.layers.{layer_id}.attention", weights=weights
)
self.mlp = GPTNeoXMLP(
config, prefix=f"{prefix}.layers.{layer_id}.mlp", weights=weights
)
def forward(
self,
hidden_states,
position_ids,
attention_mask=None,
head_mask=None,
use_cache=False,
layer_past=None,
output_attentions=False,
):
attention_layer_outputs = self.attention(
self.input_layernorm(hidden_states),
attention_mask=attention_mask,
position_ids=position_ids,
layer_past=layer_past,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attention_layer_outputs[
0
] # output_attn: attn_output, present, (attn_weights)
outputs = attention_layer_outputs[1:]
if self.use_parallel_residual:
# pseudocode:
# x = x + attn(ln1(x)) + mlp(ln2(x))
mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))
hidden_states = mlp_output + attn_output + hidden_states
else:
# pseudocode:
# x = x + attn(ln1(x))
# x = x + mlp(ln2(x))
attn_output = attn_output + hidden_states
mlp_output = self.mlp(self.post_attention_layernorm(attn_output))
hidden_states = mlp_output + attn_output
if use_cache:
outputs = (
hidden_states,
) + outputs # hidden_states, present, (attn_weights)
else:
outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)
return outputs
class GPTNeoXModel(GPTNeoXPreTrainedModel):
def __init__(self, prefix: str, config, weights):
super().__init__(config)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.embed_in = TensorParallelEmbedding(
prefix=f"{prefix}.embed_in", weights=weights
)
self.layers = nn.ModuleList(
[
GPTNeoXLayer(layer_id, prefix, config, weights)
for layer_id in range(config.num_hidden_layers)
]
)
self.final_layer_norm = nn.LayerNorm.load(
prefix=f"{prefix}.final_layer_norm",
weights=weights,
eps=config.layer_norm_eps,
)
self.tp_world_size = weights.process_group.size()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids=None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
r"""
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * self.config.num_hidden_layers)
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(
past_length, seq_length + past_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
else:
position_ids = position_ids.view(-1, seq_length).long()
if inputs_embeds is None:
inputs_embeds = self.embed_in(input_ids)
hidden_states = inputs_embeds
# Attention mask.
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values[0] is not None:
past_key_values_length = past_key_values[0][0].shape[-1]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is None:
attention_mask = torch.ones(
(batch_size, seq_length_with_past), device=hidden_states.device
)
else:
attention_mask = attention_mask.to(hidden_states.device)
causal_mask = prepare_attn_mask(
attention_mask,
input_shape=(batch_size, seq_length),
past_key_values_length=past_key_values_length,
)
assert self.num_attention_heads % self.tp_world_size == 0
block_size = self.num_attention_heads // self.tp_world_size
causal_mask = torch.repeat_interleave(causal_mask, block_size, dim=0)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
presents = () if use_cache else None
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = layer(
hidden_states,
position_ids=position_ids,
attention_mask=causal_mask,
head_mask=head_mask[i],
layer_past=layer_past,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
hidden_states = self.final_layer_norm(hidden_states)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class GPTNeoxForCausalLM(GPTNeoXPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, prefix: str, config, weights):
super().__init__(config)
if not prefix:
prefix = "gpt_neox"
else:
prefix = f"{prefix}.gpt_neox"
self.gpt_neox = GPTNeoXModel(prefix, config, weights)
self.embed_out = SpeculativeHead.load(
config, prefix="embed_out", weights=weights
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are
only required when the model is used as a decoder in a Sequence to Sequence model.
Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see
`past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
>>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b")
>>> config.is_decoder = True
>>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.gpt_neox(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
lm_logits, speculative_logits = self.embed_out(hidden_states)
lm_loss = None
if labels is not None:
# move labels to correct device to enable model parallelism
labels = labels.to(lm_logits.device)
# we are doing next-token prediction; shift prediction scores and input ids by one
shift_logits = lm_logits[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)
)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((lm_loss,) + output) if lm_loss is not None else output
return (
CausalLMOutputWithPast(
loss=lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
),
speculative_logits,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
**kwargs,
):
input_shape = input_ids.shape
# cut decoder_input_ids if past is used
if past_key_values and past_key_values[0] is not None:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"attention_mask": attention_mask,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
)
return model_inputs
def _reorder_cache(self, past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx)
for past_state in layer_past[:2]
)
+ layer_past[2:],
)
return reordered_past
| text-generation-inference/server/text_generation_server/models/custom_modeling/neox_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/neox_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 14228
} | 228 |
import torch
from PIL import Image
from io import BytesIO
from opentelemetry import trace
from typing import Iterable, Optional, Tuple, List, Type, Dict
from transformers import PreTrainedTokenizerBase
from transformers.image_processing_utils import select_best_resolution
from text_generation_server.pb import generate_pb2
from text_generation_server.models.flash_causal_lm import (
FlashCausalLMBatch,
FlashCausalLM,
block_tables_to_ragged,
)
from text_generation_server.models.globals import PREFIX_CACHING, ATTENTION
from text_generation_server.utils.log import log_master
from transformers import AutoProcessor
from text_generation_server.layers.attention import Seqlen
tracer = trace.get_tracer(__name__)
IDEFICS2_FAKE_TOKEN = "<fake_token_around_image>"
IDEFICS2_IMAGE_TOKEN = "<image>"
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
"""
Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
Args:
image_size (`tuple`):
The size of the input image in the format (height, width).
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
tuple: The shape of the image patch grid in the format (width, height).
"""
if not isinstance(grid_pinpoints, list):
raise ValueError("grid_pinpoints should be a list of tuples or lists")
height, width = select_best_resolution(image_size, grid_pinpoints)
return height // patch_size, width // patch_size
def image_text_replacement(processor, image_input, config, image_id: int) -> str:
if config.model_type == "idefics2":
image_seq_len = 64
image_str = f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_IMAGE_TOKEN * image_seq_len}{IDEFICS2_FAKE_TOKEN}"
if processor.image_processor.do_image_splitting:
image_str *= 5
return image_str
elif config.model_type == "llava_next":
height, width = image_input["image_sizes"][image_id]
num_features = get_number_of_features(height, width, config)
from loguru import logger
log_master(
logger.info,
f"Found {num_features} features in image of resolution {height}x{width}",
)
return "<image>" * num_features
elif config.model_type == "paligemma":
return "<image>" * config.text_config.num_image_tokens
else:
raise RuntimeError(f"Unknown config {config.model_type} for multimodal")
def image_text_replacement_fixup(config, text: str) -> str:
if config.model_type == "idefics2":
return text.replace(
f"{IDEFICS2_FAKE_TOKEN}{IDEFICS2_FAKE_TOKEN}", IDEFICS2_FAKE_TOKEN
)
return text
def get_unpadded_features(
original_height: int,
original_width: int,
npatches: int,
num_patch_height: int,
num_patch_width: int,
) -> Tuple[int, int]:
current_height = npatches * num_patch_height
current_width = npatches * num_patch_width
aspect_ratio: float = original_width / original_height
current_aspect_ratio: float = current_width / current_height
if aspect_ratio > current_aspect_ratio:
new_height = (original_height * current_width) // original_width
padding = (current_height - new_height) // 2
current_height = current_height - (2 * padding)
else:
new_width = (original_width * current_height) // original_height
padding = (current_width - new_width) // 2
current_width = current_width - (2 * padding)
unpadded_features = current_height * current_width
newline_features = current_height
return (unpadded_features, newline_features)
def get_number_of_features(height: int, width: int, config) -> int:
# From config
# Hardcoded for CLIP for now
# image_grid_pinpoints = [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]
image_grid_pinpoints = config.image_grid_pinpoints
image_size = config.vision_config.image_size
patch_size = config.vision_config.patch_size
assert image_size % patch_size == 0
npatches = image_size // patch_size
# Dimensions are intentionally swapped to be bug-compatible with
# upstream: https://github.com/LLaVA-VL/LLaVA-NeXT/issues/59
num_patch_width, num_patch_height = get_anyres_image_grid_shape(
[height, width],
image_grid_pinpoints,
image_size,
)
unpadded_features, newline_features = get_unpadded_features(
height, width, npatches, num_patch_height, num_patch_width
)
# The base patch covers the entire image
base_features = npatches**2
return unpadded_features + newline_features + base_features
class VlmCausalLMBatch(FlashCausalLMBatch):
pixel_values: Optional[List[torch.Tensor]]
pixel_attention_mask: Optional[List[torch.Tensor]]
image_sizes: Optional[List[Tuple[int, int]]]
@classmethod
@tracer.start_as_current_span("concatenate")
def concatenate(cls, batches):
batch = super(VlmCausalLMBatch, cls).concatenate(batches)
batch.pixel_values = None
batch.pixel_attention_mask = None
batch.image_sizes = None
return batch
@tracer.start_as_current_span("filter")
def filter(self, request_ids: List[int]):
batch = super().filter(request_ids)
batch.pixel_values = None
batch.pixel_attention_mask = None
batch.image_sizes = None
return batch
@classmethod
def batch_tokenized_inputs(
cls, requests: Iterable[generate_pb2.Request], tokenizer, processor, config
):
# Process images first. We need all of them so that the processor
# can make the image splits the same size. And we need the final
# sizes to insert correct number of image tokens.
images = []
for r in requests:
for chunk in r.input_chunks.chunks:
chunk_type = chunk.WhichOneof("chunk")
if chunk_type == "text":
pass
elif chunk_type == "image":
image = Image.open(BytesIO(chunk.image.data))
if config.model_type == "llava_next":
images.append(image)
else:
images.append([image])
else:
raise RuntimeError(f"Invalid chunk type {chunk_type}")
if images:
image_inputs = processor.image_processor(images, return_tensors="pt")
else:
image_inputs = None
batch_inputs = []
max_truncation = 0
image_id = 0
for r in requests:
full_text = ""
for chunk in r.input_chunks.chunks:
chunk_type = chunk.WhichOneof("chunk")
if chunk_type == "text":
full_text += chunk.text
elif chunk_type == "image":
full_text += image_text_replacement(
processor, image_inputs, config, image_id
)
image_id += 1
full_text = image_text_replacement_fixup(config, full_text)
batch_inputs.append(full_text)
max_truncation = max(max_truncation, r.truncate)
batch_tokenized_inputs = tokenizer(
batch_inputs,
truncation=True,
max_length=max_truncation,
add_special_tokens=not config.model_type == "paligemma",
)["input_ids"]
return batch_tokenized_inputs, image_inputs
@classmethod
def from_pb_processor(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
processor,
config,
dtype: torch.dtype,
device: torch.device,
) -> "VlmCausalLMBatch":
batch_tokenized_inputs, image_inputs = cls.batch_tokenized_inputs(
pb.requests, tokenizer, processor, config
)
batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)
if image_inputs is not None:
batch.pixel_values = image_inputs["pixel_values"].to(device=device)
if "pixel_attention_mask" in image_inputs:
batch.pixel_attention_mask = image_inputs["pixel_attention_mask"].to(
device=device
)
else:
batch.pixel_attention_mask = None
if "image_sizes" in image_inputs:
batch.image_sizes = image_inputs["image_sizes"].to(device=device)
else:
batch.image_sizes = None
else:
batch.pixel_values = None
batch.pixel_attention_mask = None
batch.image_sizes = None
return batch
class VlmCausalLM(FlashCausalLM):
def __init__(
self,
model_id: str,
*,
processor_class=AutoProcessor,
processor_kwargs=None,
batch_class=VlmCausalLMBatch,
revision,
trust_remote_code: bool,
**kwargs,
):
if PREFIX_CACHING:
raise NotImplementedError("Vlm do not work with prefix caching yet")
if processor_kwargs is None:
processor_kwargs = {}
self.processor = processor_class.from_pretrained(
model_id,
revision=revision,
trust_remote_code=trust_remote_code,
**processor_kwargs,
)
self.batch_class = batch_class
super().__init__(
model_id=model_id,
revision=revision,
trust_remote_code=trust_remote_code,
**kwargs,
)
@property
def batch_type(self) -> Type[VlmCausalLMBatch]:
return self.batch_class
def max_past(self) -> Optional[int]:
return getattr(self.model.text_model, "max_past", None)
def forward(
self,
batch: VlmCausalLMBatch,
adapter_data: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# Model Forward
if batch.speculative_ids is not None:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
max_s = batch.max_seqlen
lm_head_indices = batch.prefill_head_indices
speculative_ids = batch.speculative_ids
B, speculative_length = speculative_ids.shape
new_length = speculative_length + 1
new_input_ids = torch.cat(
[input_ids.unsqueeze(-1), speculative_ids], dim=1
).reshape(-1)
arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0)
arange_int = arange.to(dtype=torch.int32)
new_position_ids = (
position_ids.unsqueeze(-1).expand(B, new_length) + arange
).view(-1)
slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1)
input_lengths = (
input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int
).view(-1)
prefix_lens_tensor = (
batch.prefix_lens_tensor.unsqueeze(-1).expand(B, new_length)
).reshape(-1)
# Add Copy the block tables for all members
block_tables = (
block_tables.unsqueeze(1)
.expand(B, new_length, -1)
.reshape(B * new_length, -1)
.contiguous()
)
max_s = max_s + speculative_length
input_ids = new_input_ids
position_ids = new_position_ids
else:
input_ids = batch.input_ids
position_ids = batch.position_ids
cu_seqlen_prefill = batch.cu_seqlen_prefill
kv_cache = self.kv_cache
block_tables = batch.block_tables_tensor
slots = batch.slots[batch.slot_indices]
input_lengths = batch.input_lengths_tensor
prefix_lens_tensor = batch.prefix_lens_tensor
max_s = batch.max_seqlen
lm_head_indices = batch.prefill_head_indices
if cu_seqlen_prefill is None and self.max_past() is not None:
# In decode, not prefill, we're actually overwriting the KV-cache
# in a circular buffer mode.
# This makes sure the max_s for the decode pass is correct.
max_s = min(self.max_past(), max_s)
bs = input_ids.shape[0]
# Try to find an associated cuda graph
bs = input_ids.shape[0]
sorted_padded_bs = sorted([k for k in self.cuda_graphs.keys() if k >= bs])
if sorted_padded_bs:
# Get associated cuda graph
cuda_graph = self.cuda_graphs[sorted_padded_bs[0]]
else:
cuda_graph = None
if cu_seqlen_prefill is not None or cuda_graph is None:
input_lengths = input_lengths + prefix_lens_tensor
if PREFIX_CACHING:
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
prefix_lens=batch.prefix_lens,
)
with self._forward_context(
block_tables=block_tables,
cu_seqlen_prefill=cu_seqlen_prefill,
input_lengths=batch.input_lengths,
input_lengths_tensor=input_lengths,
prefix_lens=batch.prefix_lens,
prefix_lens_tensor=prefix_lens_tensor,
):
max_k = (input_lengths + prefix_lens_tensor).max().item()
seqlen = Seqlen(
input_lengths=input_lengths,
prefix_lengths=prefix_lens_tensor,
cu_seqlen_q=cu_seqlen_prefill,
max_q=max_s,
max_k=max_k,
)
logits, speculative_logits = self.model.forward(
input_ids=input_ids,
position_ids=position_ids,
cu_seqlen_prefill=cu_seqlen_prefill,
kv_cache=kv_cache,
block_tables=block_tables,
slots=slots,
seqlen=seqlen,
max_s=max_s,
prefill_cache_indices=batch.prefill_cache_indices,
lm_head_indices=lm_head_indices,
pixel_values=batch.pixel_values,
pixel_attention_mask=batch.pixel_attention_mask,
image_sizes=batch.image_sizes,
)
if batch.prefill_cache_indices is not None:
batch.prefill_cache_indices = None
if batch.pixel_values is not None:
batch.pixel_values = None
if batch.pixel_attention_mask is not None:
batch.pixel_attention_mask = None
if batch.image_sizes is not None:
batch.image_sizes = None
return logits, speculative_logits
# Copy inputs to the static inputs of the cuda graph
# Static inputs are potentially padded
cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids
cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids
if ATTENTION == "flashinfer":
block_tables = block_tables_to_ragged(
block_tables=block_tables,
input_lengths=batch.input_lengths,
prefix_lens=batch.prefix_lens,
)
cuda_graph["block_tables"][: block_tables.shape[0]] = block_tables
else:
cuda_graph["block_tables"][
: block_tables.shape[0], : block_tables.shape[1]
] = block_tables
cuda_graph["slots"].fill_(-1)
cuda_graph["slots"][: slots.shape[0]] = slots
cuda_graph["input_lengths"].zero_()
cuda_graph["input_lengths"][: input_lengths.shape[0]] = (
input_lengths + prefix_lens_tensor
)
# Replay the graph
cuda_graph["graph"].replay()
# Slice output to the correct shape
speculative_logits = (
cuda_graph["speculative_logits"][:bs]
if cuda_graph["speculative_logits"] is not None
else None
)
logits = cuda_graph["logits"][:bs]
return logits, speculative_logits
| text-generation-inference/server/text_generation_server/models/vlm_causal_lm.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/vlm_causal_lm.py",
"repo_id": "text-generation-inference",
"token_count": 8077
} | 229 |
import json
import os
from dataclasses import dataclass
from typing import Optional
from huggingface_hub import hf_hub_download
from text_generation_server.layers.marlin.gptq import can_use_gptq_marlin
from text_generation_server.utils.weights import (
DefaultWeightsLoader,
WeightsLoader,
)
# TODO: Split this config to have a single config type per quant method
@dataclass
class _QuantizerConfig:
bits: int
checkpoint_format: Optional[str]
desc_act: bool
groupsize: int
quant_method: str
sym: bool
@dataclass
class _FP8QuantizerConfig:
activation_scale_ub: float
# We should probably do this with Pytantic JSON deserialization,
# but for now we'll stay close to the old _set_gptq_params.
def _get_quantizer_config(model_id, revision):
bits = 4
groupsize = -1
quant_method = "gptq"
checkpoint_format = None
sym = False
desc_act = False
filename = "config.json"
try:
if os.path.exists(os.path.join(model_id, filename)):
filename = os.path.join(model_id, filename)
else:
filename = hf_hub_download(model_id, filename=filename, revision=revision)
with open(filename, "r") as f:
data = json.load(f)
# FP8 config
if data["quantization_config"]["quant_method"] == "fbgemm_fp8":
return _FP8QuantizerConfig(
activation_scale_ub=data["quantization_config"]["activation_scale_ub"]
)
if "zero_point" in data["quantization_config"]:
sym = not data["quantization_config"]["zero_point"]
quant_method = "awq"
elif "sym" in data["quantization_config"]:
sym = data["quantization_config"]["sym"]
bits = data["quantization_config"]["bits"]
groupsize = data["quantization_config"]["group_size"]
# Order is important here, desc_act is missing on some real models
quant_method = data["quantization_config"]["quant_method"]
checkpoint_format = data["quantization_config"].get("checkpoint_format")
desc_act = data["quantization_config"]["desc_act"]
except Exception:
filename = "quantize_config.json"
try:
if os.path.exists(os.path.join(model_id, filename)):
filename = os.path.join(model_id, filename)
else:
filename = hf_hub_download(
model_id, filename=filename, revision=revision
)
with open(filename, "r") as f:
data = json.load(f)
bits = data["bits"]
groupsize = data["group_size"]
if "zero_point" in data:
sym = not data["zero_point"]
quant_method = "awq"
elif "sym" in data:
sym = data["sym"]
desc_act = data["desc_act"]
if "version" in data and data["version"] == "GEMM":
quant_method = "awq"
except Exception:
filename = "quant_config.json"
try:
if os.path.exists(os.path.join(model_id, filename)):
filename = os.path.join(model_id, filename)
else:
filename = hf_hub_download(
model_id, filename=filename, revision=revision
)
with open(filename, "r") as f:
data = json.load(f)
bits = data["w_bit"]
groupsize = data["q_group_size"]
desc_act = data["desc_act"]
if "version" in data and data["version"] == "GEMM":
quant_method = "awq"
except Exception:
pass
return _QuantizerConfig(
bits=bits,
groupsize=groupsize,
quant_method=quant_method,
checkpoint_format=checkpoint_format,
sym=sym,
desc_act=desc_act,
)
def get_loader(
quantize: Optional[str], model_id: str, revision: Optional[str]
) -> WeightsLoader:
quantizer_config = _get_quantizer_config(model_id, revision)
if quantize in {"awq", "gptq"}:
from text_generation_server.layers.gptq import GPTQWeightsLoader
# TODO: improve check once we have one config type per quantize value
if not isinstance(quantizer_config, _QuantizerConfig):
raise ValueError(
f"Quantize is set to `{quantize}` but received a `{quantizer_config.__class__.__name__}` config."
)
if can_use_gptq_marlin(
bits=quantizer_config.bits,
groupsize=quantizer_config.groupsize,
quant_method=quantizer_config.quant_method,
quantize=quantize,
sym=quantizer_config.sym,
):
from text_generation_server.layers.marlin import GPTQMarlinWeightsLoader
return GPTQMarlinWeightsLoader(
bits=quantizer_config.bits,
desc_act=quantizer_config.desc_act,
groupsize=quantizer_config.groupsize,
quant_method=quantizer_config.quant_method,
quantize=quantize,
sym=quantizer_config.sym,
)
else:
return GPTQWeightsLoader(
bits=quantizer_config.bits,
desc_act=quantizer_config.desc_act,
groupsize=quantizer_config.groupsize,
quant_method=quantizer_config.quant_method,
quantize=quantize,
sym=quantizer_config.sym,
)
elif quantize == "bitsandbytes":
from text_generation_server.layers.bnb import BNBWeight
return DefaultWeightsLoader(BNBWeight)
elif quantize == "bitsandbytes-fp4":
from text_generation_server.layers.bnb import BNBFP4Weight
return DefaultWeightsLoader(BNBFP4Weight)
elif quantize == "bitsandbytes-nf4":
from text_generation_server.layers.bnb import BNBNF4Weight
return DefaultWeightsLoader(BNBNF4Weight)
elif quantize == "eetq":
from text_generation_server.layers.eetq import EETQWeight
return DefaultWeightsLoader(EETQWeight)
elif quantize == "exl2":
from text_generation_server.layers.exl2 import Exl2WeightsLoader
return Exl2WeightsLoader()
elif quantize == "marlin":
from text_generation_server.layers.marlin import MarlinWeightsLoader
# TODO: improve check once we have one config type per quantize value
if not isinstance(quantizer_config, _QuantizerConfig):
raise ValueError(
f"Quantize is set to `{quantize}` but received a `{quantizer_config.__class__.__name__}` config."
)
return MarlinWeightsLoader(
bits=quantizer_config.bits,
is_marlin_24=quantizer_config.checkpoint_format == "marlin_24",
)
elif quantize == "fp8" or quantize is None:
from text_generation_server.layers.fp8 import HybridFP8UnquantLoader
# Since the default for the quantize config is _QuantizerConfig,
# we need to add this check to not get an attribute error
activation_scale_ub = None
if isinstance(quantizer_config, _FP8QuantizerConfig):
activation_scale_ub = quantizer_config.activation_scale_ub
return HybridFP8UnquantLoader(activation_scale_ub, to_fp8=quantize == "fp8")
else:
raise ValueError(f"Unknown quantization method: {quantize}")
| text-generation-inference/server/text_generation_server/utils/quantization.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/quantization.py",
"repo_id": "text-generation-inference",
"token_count": 3406
} | 230 |
target
.yarn | tokenizers/bindings/node/.prettierignore/0 | {
"file_path": "tokenizers/bindings/node/.prettierignore",
"repo_id": "tokenizers",
"token_count": 5
} | 231 |
{
"name": "tokenizers-win32-ia32-msvc",
"version": "0.13.4-rc1",
"os": [
"win32"
],
"cpu": [
"ia32"
],
"main": "tokenizers.win32-ia32-msvc.node",
"files": [
"tokenizers.win32-ia32-msvc.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/win32-ia32-msvc/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/win32-ia32-msvc/package.json",
"repo_id": "tokenizers",
"token_count": 277
} | 232 |
use crate::decoders::Decoder;
use crate::encoding::{JsEncoding, JsTruncationDirection, JsTruncationStrategy};
use crate::models::Model;
use crate::normalizers::Normalizer;
use crate::pre_tokenizers::PreTokenizer;
use crate::processors::Processor;
use crate::tasks::tokenizer::{DecodeBatchTask, DecodeTask, EncodeBatchTask, EncodeTask};
use crate::trainers::Trainer;
use std::collections::HashMap;
use tokenizers::Model as ModelTrait;
use napi::bindgen_prelude::*;
use napi_derive::napi;
use std::sync::{Arc, RwLock};
use tokenizers as tk;
#[napi]
#[derive(Default)]
pub enum PaddingDirection {
#[default]
Left,
Right,
}
impl From<PaddingDirection> for tk::PaddingDirection {
fn from(w: PaddingDirection) -> Self {
match w {
PaddingDirection::Left => tk::PaddingDirection::Left,
PaddingDirection::Right => tk::PaddingDirection::Right,
}
}
}
impl TryFrom<String> for PaddingDirection {
type Error = Error;
fn try_from(w: String) -> Result<Self> {
match w.as_str() {
"left" => Ok(PaddingDirection::Left),
"right" => Ok(PaddingDirection::Right),
s => Err(Error::from_reason(format!(
"{s:?} is not a valid direction"
))),
}
}
}
#[napi(object)]
#[derive(Default)]
pub struct PaddingOptions {
pub max_length: Option<u32>,
pub direction: Option<Either<String, PaddingDirection>>,
pub pad_to_multiple_of: Option<u32>,
pub pad_id: Option<u32>,
pub pad_type_id: Option<u32>,
pub pad_token: Option<String>,
}
impl TryFrom<PaddingOptions> for tk::PaddingParams {
type Error = Error;
fn try_from(value: PaddingOptions) -> Result<Self> {
let direction = match value.direction {
Some(either) => match either {
Either::A(string) => {
let direction: PaddingDirection = string.try_into()?;
direction.into()
}
Either::B(direction) => direction.into(),
},
None => tk::PaddingDirection::Right,
};
Ok(Self {
pad_to_multiple_of: value.pad_to_multiple_of.map(|s| s as usize),
pad_id: value.pad_id.unwrap_or_default(),
pad_type_id: value.pad_type_id.unwrap_or_default(),
pad_token: value.pad_token.unwrap_or("[PAD]".to_string()),
direction,
strategy: match value.max_length {
Some(length) => tk::PaddingStrategy::Fixed(length as usize),
None => tk::PaddingStrategy::BatchLongest,
},
})
}
}
#[napi(object)]
#[derive(Default)]
pub struct EncodeOptions {
pub is_pretokenized: Option<bool>,
pub add_special_tokens: Option<bool>,
}
#[derive(Default)]
struct EncodeOptionsDef {
// TODO
// is_pretokenized: bool,
add_special_tokens: bool,
}
impl From<EncodeOptions> for EncodeOptionsDef {
fn from(value: EncodeOptions) -> Self {
EncodeOptionsDef {
// TODO
// is_pretokenized: value.is_pretokenized.unwrap_or(false),
add_special_tokens: value.add_special_tokens.unwrap_or(true),
}
}
}
#[napi(object)]
#[derive(Default)]
pub struct TruncationOptions {
pub max_length: Option<u32>,
pub strategy: Option<JsTruncationStrategy>,
pub direction: Option<Either<String, JsTruncationDirection>>,
pub stride: Option<u32>,
}
impl TryFrom<TruncationOptions> for tk::TruncationParams {
type Error = Error;
fn try_from(value: TruncationOptions) -> Result<Self> {
let direction = match value.direction {
Some(either) => match either {
Either::A(string) => {
let direction: JsTruncationDirection = string.try_into()?;
direction.into()
}
Either::B(direction) => direction.into(),
},
None => Default::default(),
};
Ok(Self {
max_length: value.max_length.unwrap_or(0) as usize,
strategy: value.strategy.map(|s| s.into()).unwrap_or_default(),
direction,
stride: value.stride.unwrap_or_default() as usize,
})
}
}
#[napi(object)]
pub struct AddedTokenOptions {
pub single_word: Option<bool>,
pub left_strip: Option<bool>,
pub right_strip: Option<bool>,
pub normalized: Option<bool>,
}
#[napi]
#[derive(Clone)]
pub struct AddedToken {
token: tk::AddedToken,
}
#[napi]
impl AddedToken {
#[napi(constructor)]
pub fn from(token: String, is_special: bool, options: Option<AddedTokenOptions>) -> Self {
let mut token = tk::AddedToken::from(token, is_special);
if let Some(options) = options {
if let Some(sw) = options.single_word {
token = token.single_word(sw);
}
if let Some(ls) = options.left_strip {
token = token.lstrip(ls);
}
if let Some(rs) = options.right_strip {
token = token.rstrip(rs);
}
if let Some(n) = options.normalized {
token = token.normalized(n);
}
}
Self { token }
}
#[napi]
pub fn get_content(&self) -> String {
self.token.content.clone()
}
}
impl From<AddedToken> for tk::AddedToken {
fn from(v: AddedToken) -> Self {
v.token
}
}
type RsTokenizer = tk::TokenizerImpl<Model, Normalizer, PreTokenizer, Processor, Decoder>;
#[napi]
#[derive(Clone)]
pub struct Tokenizer {
pub(crate) tokenizer: Arc<RwLock<RsTokenizer>>,
}
#[napi]
impl Tokenizer {
#[napi(constructor)]
pub fn new(model: &Model) -> Self {
Self {
tokenizer: Arc::new(RwLock::new(tk::TokenizerImpl::new((*model).clone()))),
}
}
#[napi]
pub fn set_pre_tokenizer(&mut self, pre_tokenizer: &PreTokenizer) {
self
.tokenizer
.write()
.unwrap()
.with_pre_tokenizer(Some((*pre_tokenizer).clone()));
}
#[napi]
pub fn set_decoder(&mut self, decoder: &Decoder) {
self
.tokenizer
.write()
.unwrap()
.with_decoder(Some((*decoder).clone()));
}
#[napi]
pub fn set_model(&mut self, model: &Model) {
self.tokenizer.write().unwrap().with_model((*model).clone());
}
#[napi]
pub fn set_post_processor(&mut self, post_processor: &Processor) {
self
.tokenizer
.write()
.unwrap()
.with_post_processor(Some((*post_processor).clone()));
}
#[napi]
pub fn set_normalizer(&mut self, normalizer: &Normalizer) {
self
.tokenizer
.write()
.unwrap()
.with_normalizer(Some((*normalizer).clone()));
}
#[napi]
pub fn save(&self, path: String, pretty: Option<bool>) -> Result<()> {
let pretty = pretty.unwrap_or(false);
self
.tokenizer
.read()
.unwrap()
.save(path, pretty)
.map_err(|e| Error::from_reason(format!("{}", e)))
}
#[napi]
pub fn add_added_tokens(&mut self, tokens: Vec<&AddedToken>) -> u32 {
let tokens: Vec<_> = tokens
.into_iter()
.map(|tok| (*tok).clone().into())
.collect();
self.tokenizer.write().unwrap().add_tokens(&tokens) as u32
}
#[napi]
pub fn add_tokens(&mut self, tokens: Vec<String>) -> u32 {
let tokens: Vec<_> = tokens
.into_iter()
.map(|tok| tk::AddedToken::from(tok, false))
.collect();
self.tokenizer.write().unwrap().add_tokens(&tokens) as u32
}
#[napi(ts_return_type = "Promise<JsEncoding>")]
pub fn encode(
&self,
#[napi(ts_arg_type = "InputSequence")] sentence: String,
#[napi(ts_arg_type = "InputSequence | null")] pair: Option<String>,
encode_options: Option<EncodeOptions>,
) -> AsyncTask<EncodeTask<'static>> {
let options: EncodeOptionsDef = encode_options.unwrap_or_default().into();
let input: tk::EncodeInput = match pair {
Some(pair) => (sentence, pair).into(),
None => sentence.into(),
};
AsyncTask::new(EncodeTask {
tokenizer: (*self).clone(),
input: Some(input),
add_special_tokens: options.add_special_tokens,
})
}
#[napi(ts_return_type = "Promise<JsEncoding[]>")]
pub fn encode_batch(
&self,
#[napi(ts_arg_type = "EncodeInput[]")] sentences: Vec<String>,
encode_options: Option<EncodeOptions>,
) -> AsyncTask<EncodeBatchTask<'static>> {
let options: EncodeOptionsDef = encode_options.unwrap_or_default().into();
let inputs: Vec<tk::EncodeInput> = sentences
.into_iter()
.map(|sentence| sentence.into())
.collect();
AsyncTask::new(EncodeBatchTask {
tokenizer: (*self).clone(),
inputs: Some(inputs),
add_special_tokens: options.add_special_tokens,
})
}
#[napi(ts_return_type = "Promise<string>")]
pub fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> AsyncTask<DecodeTask> {
AsyncTask::new(DecodeTask {
tokenizer: (*self).clone(),
ids,
skip_special_tokens,
})
}
#[napi(ts_return_type = "Promise<string[]>")]
pub fn decode_batch(
&self,
ids: Vec<Vec<u32>>,
skip_special_tokens: bool,
) -> AsyncTask<DecodeBatchTask> {
AsyncTask::new(DecodeBatchTask {
tokenizer: (*self).clone(),
ids,
skip_special_tokens,
})
}
#[napi(factory)]
pub fn from_string(s: String) -> Result<Self> {
let tokenizer: tk::tokenizer::TokenizerImpl<
Model,
Normalizer,
PreTokenizer,
Processor,
Decoder,
> = s
.parse()
.map_err(|e| Error::from_reason(format!("{}", e)))?;
Ok(Self {
tokenizer: Arc::new(RwLock::new(tokenizer)),
})
}
#[napi(factory)]
pub fn from_file(file: String) -> Result<Self> {
let tokenizer = tk::tokenizer::TokenizerImpl::from_file(file)
.map_err(|e| Error::from_reason(format!("Error loading from file{}", e)))?;
Ok(Self {
tokenizer: Arc::new(RwLock::new(tokenizer)),
})
}
#[napi]
pub fn add_special_tokens(&mut self, tokens: Vec<String>) {
let tokens: Vec<_> = tokens
.into_iter()
.map(|s| tk::AddedToken::from(s, true))
.collect();
self.tokenizer.write().unwrap().add_special_tokens(&tokens);
}
#[napi]
pub fn set_truncation(
&mut self,
max_length: u32,
options: Option<TruncationOptions>,
) -> Result<()> {
let mut options: tk::TruncationParams = if let Some(options) = options {
options.try_into()?
} else {
Default::default()
};
options.max_length = max_length as usize;
self
.tokenizer
.write()
.unwrap()
.with_truncation(Some(options))
.unwrap();
Ok(())
}
#[napi]
pub fn disable_truncation(&mut self) {
self
.tokenizer
.write()
.unwrap()
.with_truncation(None)
.unwrap();
}
#[napi]
pub fn set_padding(&mut self, options: Option<PaddingOptions>) -> Result<()> {
let options = if let Some(options) = options {
Some(options.try_into()?)
} else {
None
};
self.tokenizer.write().unwrap().with_padding(options);
Ok(())
}
#[napi]
pub fn disable_padding(&mut self) {
self.tokenizer.write().unwrap().with_padding(None);
}
#[napi]
pub fn get_decoder(&self) -> Option<Decoder> {
self.tokenizer.read().unwrap().get_decoder().cloned()
}
#[napi]
pub fn get_normalizer(&self) -> Option<Normalizer> {
self.tokenizer.read().unwrap().get_normalizer().cloned()
}
#[napi]
pub fn get_pre_tokenizer(&self) -> Option<PreTokenizer> {
self.tokenizer.read().unwrap().get_pre_tokenizer().cloned()
}
#[napi]
pub fn get_post_processor(&self) -> Option<Processor> {
self.tokenizer.read().unwrap().get_post_processor().cloned()
}
#[napi]
pub fn get_vocab(&self, with_added_tokens: Option<bool>) -> HashMap<String, u32> {
let with_added_tokens = with_added_tokens.unwrap_or(true);
self.tokenizer.read().unwrap().get_vocab(with_added_tokens)
}
#[napi]
pub fn get_vocab_size(&self, with_added_tokens: Option<bool>) -> u32 {
self.get_vocab(with_added_tokens).len() as u32
}
#[napi]
pub fn id_to_token(&self, id: u32) -> Option<String> {
self.tokenizer.read().unwrap().id_to_token(id)
}
#[napi]
pub fn token_to_id(&self, token: String) -> Option<u32> {
self.tokenizer.read().unwrap().token_to_id(&token)
}
#[napi]
pub fn train(&mut self, files: Vec<String>) -> Result<()> {
let mut trainer: Trainer = self
.tokenizer
.read()
.unwrap()
.get_model()
.model
.as_ref()
.unwrap()
.read()
.unwrap()
.get_trainer()
.into();
self
.tokenizer
.write()
.unwrap()
.train_from_files(&mut trainer, files)
.map_err(|e| Error::from_reason(format!("{}", e)))?;
Ok(())
}
#[napi]
pub fn running_tasks(&self) -> u32 {
std::sync::Arc::strong_count(&self.tokenizer) as u32
}
#[napi]
pub fn post_process(
&self,
encoding: &JsEncoding,
pair: Option<&JsEncoding>,
add_special_tokens: Option<bool>,
) -> Result<JsEncoding> {
let add_special_tokens = add_special_tokens.unwrap_or(true);
Ok(
self
.tokenizer
.read()
.unwrap()
.post_process(
(*encoding).clone().try_into()?,
if let Some(pair) = pair {
Some((*pair).clone().try_into()?)
} else {
None
},
add_special_tokens,
)
.map_err(|e| Error::from_reason(format!("{}", e)))?
.into(),
)
}
}
#[napi(object)]
#[derive(Default)]
pub struct JsFromPretrainedParameters {
pub revision: Option<String>,
pub auth_token: Option<String>,
}
| tokenizers/bindings/node/src/tokenizer.rs/0 | {
"file_path": "tokenizers/bindings/node/src/tokenizer.rs",
"repo_id": "tokenizers",
"token_count": 5713
} | 233 |
import argparse
import logging
import time
from tqdm import tqdm
from tokenizers import Tokenizer, decoders, pre_tokenizers
from tokenizers.models import BPE, WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.processors import BertProcessing
from transformers import BertTokenizer, GPT2Tokenizer
logging.getLogger("transformers").disabled = True
logging.getLogger("transformers.tokenization_utils").disabled = True
parser = argparse.ArgumentParser()
parser.add_argument("--type", default="gpt2", type=str, help="The type of tokenizer (bert|gpt2)")
parser.add_argument("--file", default=None, type=str, help="The file to encode")
parser.add_argument("--vocab", default=None, type=str, required=True, help="The vocab file")
parser.add_argument("--merges", default=None, type=str, help="The merges.txt file")
parser.add_argument("--debug", action="store_true", help="Verbose output")
args = parser.parse_args()
if args.type == "gpt2" and args.merges is None:
raise Exception("Expected merges.txt file")
if args.file is not None:
with open(args.file, "r") as fp:
text = [line.strip() for line in fp]
else:
text = """
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
""".split("\n")
if args.type == "gpt2":
print("Running GPT-2 tokenizer")
tok_p = GPT2Tokenizer.from_pretrained("gpt2")
# Create a Tokenizer using BPE
tok_r = Tokenizer(BPE(args.vocab, args.merges))
# Use ByteLevel PreTokenizer
tok_r.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
# Use ByteLevel Decoder
tok_r.decoder = decoders.ByteLevel()
elif args.type == "bert":
print("Running Bert tokenizer")
tok_p = BertTokenizer.from_pretrained(args.vocab)
tok_r = Tokenizer(WordPiece(args.vocab, unk_token="[UNK]", max_input_chars_per_word=100))
tok_r.normalizer = BertNormalizer(
clean_text=True,
handle_chinese_chars=True,
strip_accents=True,
lowercase=True,
)
# tok_r.pre_tokenizer = pre_tokenizers.Whitespace()
tok_r.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tok_r.decoder = decoders.WordPiece()
tok_r.post_processor = BertProcessing(
("[SEP]", tok_r.token_to_id("[SEP]")),
("[CLS]", tok_r.token_to_id("[CLS]")),
)
else:
raise Exception(f"Unknown type {args.type}")
def tokenize_r():
return tok_r.encode_batch(text)
def tokenize_p():
return [tok_p.encode(sentence, add_special_tokens=True) for sentence in tqdm(text)]
print(f"Tokenizing {len(text)} lines")
# Rust version
start = time.time()
encoded_r = tokenize_r()
end = time.time()
time_r = end - start
print(f"Rust tokenizer took: {time_r} sec")
# Python version
start = time.time()
encoded_p = tokenize_p()
end = time.time()
time_p = end - start
print(f"Transformer tokenizer took: {time_p} sec")
print(f"SpeedUp Ratio: {time_p / time_r}")
ids_r = [sentence.ids for sentence in encoded_r]
diff_ids = 0
for i in range(0, len(encoded_r)):
if encoded_r[i].ids != encoded_p[i]:
diff_ids += 1
if args.debug:
print(encoded_r[i].ids)
print(encoded_p[i])
print(encoded_r[i].tokens)
print(tok_p.tokenize(text[i]))
print(text[i])
print("")
print(f"Ids differences: {diff_ids}")
decoded_r = tok_r.decode_batch([sentence.ids for sentence in encoded_r], False)
decoded_p = [tok_p.decode(en) for en in encoded_p]
diff_decoded = 0
for i in range(0, len(text)):
if decoded_r[i] != decoded_p[i]:
diff_decoded += 1
if args.debug:
print(f"Original: {text[i]}")
print(f"Rust: {decoded_r[i]}")
print(f"Python: {decoded_p[i]}")
print("")
print(f"Decoding differences: {diff_decoded}")
| tokenizers/bindings/python/examples/example.py/0 | {
"file_path": "tokenizers/bindings/python/examples/example.py",
"repo_id": "tokenizers",
"token_count": 1770
} | 234 |
# Generated content DO NOT EDIT
from .. import models
Model = models.Model
BPE = models.BPE
Unigram = models.Unigram
WordLevel = models.WordLevel
WordPiece = models.WordPiece
| tokenizers/bindings/python/py_src/tokenizers/models/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/models/__init__.py",
"repo_id": "tokenizers",
"token_count": 56
} | 235 |
from argparse import ArgumentParser
from json import dump
from logging import basicConfig, getLogger
from os import linesep, remove
from os.path import exists
from tempfile import NamedTemporaryFile
from typing import Dict, List, Tuple
from requests import get
from sentencepiece import SentencePieceProcessor
from tqdm import trange, tqdm
basicConfig()
logger = getLogger()
class SentencePieceExtractor:
"""
Extractor implementation for SentencePiece trained models.
https://github.com/google/sentencepiece
"""
def __init__(self, model: str):
# Get SentencePiece
self.sp = SentencePieceProcessor()
self.sp.Load(model)
def extract(self) -> Tuple[Dict[str, int], List[Tuple]]:
sp = self.sp
vocab = {sp.id_to_piece(index): index for index in trange(sp.GetPieceSize())}
# Merges
merges = []
for piece_l in tqdm(vocab.keys(), total=sp.GetPieceSize()):
for piece_r in vocab.keys():
merge = f"{piece_l}{piece_r}"
piece_id = vocab.get(merge, None)
if piece_id:
merges += [(piece_l, piece_r, piece_id)]
merges = sorted(merges, key=lambda val: val[2])
merges = [(val[0], val[1]) for val in merges]
return vocab, merges
class YouTokenToMeExtractor:
"""
Extractor implementation for YouTokenToMe trained models format.
Model are as follow:
vocab_size nb_merges
piece piece_id
...(repeated vocab_size)
piece_id_left piece_id_right piece_id
...(repeated nb merges)
"""
def __init__(self, model: str):
self._model = model
def extract(self) -> Tuple[Dict[str, int], List[Tuple]]:
with open(self._model, "r") as model_f:
# Retrieve information
nb_pieces, nb_merges = map(int, model_f.readline().split())
vocab, merges = {}, []
# Vocab
for _ in trange(nb_pieces):
piece, piece_id = map(int, model_f.readline().split())
vocab[piece_id] = chr(piece)
# Merges
for _ in trange(nb_merges):
piece_id_l, piece_id_r, piece = map(int, model_f.readline().split())
piece_l, piece_r = vocab[piece_id_l], vocab[piece_id_r]
vocab[piece] = f"{piece_l}{piece_r}"
merges += [(piece_l, piece_r)]
# Special tokens
unk, pad, bos, eos = map(int, model_f.readline().split())
vocab[unk] = "<unk>"
vocab[pad] = "<pad>"
vocab[bos] = "<bos>"
vocab[eos] = "<eos>"
# Invert key and value for vocab
vocab = dict(zip(vocab.values(), vocab.keys()))
return vocab, merges
if __name__ == "__main__":
parser = ArgumentParser("SentencePiece vocab extractor")
parser.add_argument(
"--provider",
type=str,
required=True,
choices=["sentencepiece", "youtokentome"],
help="Indicate the format of the file.",
)
parser.add_argument("--model", type=str, required=True, help="SentencePiece model to extract vocab from.")
parser.add_argument(
"--vocab-output-path",
type=str,
required=True,
help="Path where the vocab.json file will be extracted",
)
parser.add_argument(
"--merges-output-path",
type=str,
required=True,
help="Path where the merges file will be extracted",
)
# Parse cli arguments
args = parser.parse_args()
try:
if args.model.startswith("http"):
# Saving model
with NamedTemporaryFile("wb", delete=False) as f:
logger.info("Writing content from {} to {}".format(args.model, f.name))
response = get(args.model, allow_redirects=True)
f.write(response.content)
args.remote_model = args.model
args.model = f.name
# Allocate extractor
extractor = SentencePieceExtractor if args.provider == "sentencepiece" else YouTokenToMeExtractor
extractor = extractor(args.model)
logger.info(f"Using {type(extractor).__name__}")
# Open output files and let's extract model information
with open(args.vocab_output_path, "w") as vocab_f:
with open(args.merges_output_path, "w") as merges_f:
# Do the extraction
vocab, merges = extractor.extract()
# Save content
dump(vocab, vocab_f)
merges_f.writelines(map(lambda x: f"{x[0]} {x[1]}{linesep}", merges))
finally:
# If model was downloaded from internet we need to cleanup the tmp folder.
if hasattr(args, "remote_model") and exists(args.model):
remove(args.model)
| tokenizers/bindings/python/scripts/sentencepiece_extractor.py/0 | {
"file_path": "tokenizers/bindings/python/scripts/sentencepiece_extractor.py",
"repo_id": "tokenizers",
"token_count": 2231
} | 236 |
use super::regex::PyRegex;
use super::{DestroyPtr, RefMutContainer, RefMutGuard};
use crate::error::ToPyResult;
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use tk::normalizer::{char_to_bytes, NormalizedString, Range, SplitDelimiterBehavior};
use tk::pattern::Pattern;
/// Represents a Pattern as used by `NormalizedString`
#[derive(Clone, FromPyObject)]
pub enum PyPattern {
#[pyo3(annotation = "str")]
Str(String),
#[pyo3(annotation = "tokenizers.Regex")]
Regex(Py<PyRegex>),
// TODO: Add the compatibility for Fn(char) -> bool
}
impl Pattern for PyPattern {
fn find_matches(&self, inside: &str) -> tk::Result<Vec<(tk::Offsets, bool)>> {
match self {
PyPattern::Str(s) => {
let mut chars = s.chars();
if let (Some(c), None) = (chars.next(), chars.next()) {
c.find_matches(inside)
} else {
s.find_matches(inside)
}
}
PyPattern::Regex(r) => {
Python::with_gil(|py| (&r.borrow(py).inner).find_matches(inside))
}
}
}
}
impl From<PyPattern> for tk::normalizers::replace::ReplacePattern {
fn from(pattern: PyPattern) -> Self {
match pattern {
PyPattern::Str(s) => Self::String(s.to_owned()),
PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())),
}
}
}
impl From<PyPattern> for tk::pre_tokenizers::split::SplitPattern {
fn from(pattern: PyPattern) -> Self {
match pattern {
PyPattern::Str(s) => Self::String(s.to_owned()),
PyPattern::Regex(r) => Python::with_gil(|py| Self::Regex(r.borrow(py).pattern.clone())),
}
}
}
#[derive(Debug, Clone, FromPyObject)]
pub enum PyRange<'s> {
#[pyo3(annotation = "int")]
Single(isize),
#[pyo3(annotation = "Tuple[uint, uint]")]
Range(usize, usize),
#[pyo3(annotation = "slice")]
Slice(&'s PySlice),
}
impl PyRange<'_> {
pub fn to_range(&self, max_len: usize) -> PyResult<std::ops::Range<usize>> {
match self {
PyRange::Single(i) => {
if i.is_negative() {
let i = -i as usize;
if i > max_len {
Err(exceptions::PyValueError::new_err(format!(
"{} is bigger than max len",
i
)))
} else {
Ok(max_len - i..max_len - i + 1)
}
} else {
let i = *i as usize;
Ok(i..i + 1)
}
}
PyRange::Range(s, e) => Ok(*s..*e),
PyRange::Slice(s) => {
let r = s.indices(max_len as std::os::raw::c_long)?;
Ok(r.start as usize..r.stop as usize)
}
}
}
}
#[derive(Clone)]
pub struct PySplitDelimiterBehavior(pub SplitDelimiterBehavior);
impl FromPyObject<'_> for PySplitDelimiterBehavior {
fn extract(obj: &PyAny) -> PyResult<Self> {
let s = obj.extract::<&str>()?;
Ok(Self(match s {
"removed" => Ok(SplitDelimiterBehavior::Removed),
"isolated" => Ok(SplitDelimiterBehavior::Isolated),
"merged_with_previous" => Ok(SplitDelimiterBehavior::MergedWithPrevious),
"merged_with_next" => Ok(SplitDelimiterBehavior::MergedWithNext),
"contiguous" => Ok(SplitDelimiterBehavior::Contiguous),
_ => Err(exceptions::PyValueError::new_err(
"Wrong value for SplitDelimiterBehavior, expected one of: \
`removed, isolated, merged_with_previous, merged_with_next, contiguous`",
)),
}?))
}
}
impl From<PySplitDelimiterBehavior> for SplitDelimiterBehavior {
fn from(v: PySplitDelimiterBehavior) -> Self {
v.0
}
}
fn filter(normalized: &mut NormalizedString, func: &Bound<'_, PyAny>) -> PyResult<()> {
let err = "`filter` expect a callable with the signature: `fn(char) -> bool`";
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(err))
} else {
normalized.filter(|c| {
func.call1((c.to_string(),))
.expect(err)
.extract()
.expect(err)
});
Ok(())
}
}
fn for_each(normalized: &NormalizedString, func: &Bound<'_, PyAny>) -> PyResult<()> {
let err = "`for_each` expect a callable with the signature: `fn(char)`";
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(err))
} else {
normalized.for_each(|c| {
func.call1((c.to_string(),)).expect(err);
});
Ok(())
}
}
fn map(normalized: &mut NormalizedString, func: &Bound<'_, PyAny>) -> PyResult<()> {
let err = "`map` expect a callable with the signature: `fn(char) -> char`";
if !func.is_callable() {
Err(exceptions::PyTypeError::new_err(err))
} else {
normalized.map(|c| {
let c: String = func
.call1((c.to_string(),))
.expect(err)
.extract()
.expect(err);
c.chars().next().expect(err)
});
Ok(())
}
}
fn slice(
normalized: &NormalizedString,
range: &PyRange<'_>,
) -> PyResult<Option<PyNormalizedString>> {
let n_char = normalized.len();
let char_range = range.to_range(n_char)?;
Ok(
char_to_bytes(normalized.get(), char_range).and_then(|bytes_range| {
normalized
.slice(Range::Normalized(bytes_range))
.map(|n| n.into())
}),
)
}
/// NormalizedString
///
/// A NormalizedString takes care of modifying an "original" string, to obtain a "normalized" one.
/// While making all the requested modifications, it keeps track of the alignment information
/// between the two versions of the string.
///
/// Args:
/// sequence: str:
/// The string sequence used to initialize this NormalizedString
#[pyclass(module = "tokenizers", name = "NormalizedString")]
#[derive(Clone)]
pub struct PyNormalizedString {
pub(crate) normalized: NormalizedString,
}
#[pymethods]
impl PyNormalizedString {
#[new]
#[pyo3(text_signature = None)]
fn new(s: &str) -> Self {
NormalizedString::from(s).into()
}
/// The normalized part of the string
#[getter]
fn get_normalized(&self) -> &str {
self.normalized.get()
}
#[getter]
fn get_original(&self) -> &str {
self.normalized.get_original()
}
/// Runs the NFD normalization
#[pyo3(text_signature = "(self)")]
fn nfd(&mut self) {
self.normalized.nfd();
}
/// Runs the NFKD normalization
#[pyo3(text_signature = "(self)")]
fn nfkd(&mut self) {
self.normalized.nfkd();
}
/// Runs the NFC normalization
#[pyo3(text_signature = "(self)")]
fn nfc(&mut self) {
self.normalized.nfc();
}
/// Runs the NFKC normalization
#[pyo3(text_signature = "(self)")]
fn nfkc(&mut self) {
self.normalized.nfkc();
}
/// Lowercase the string
#[pyo3(text_signature = "(self)")]
fn lowercase(&mut self) {
self.normalized.lowercase();
}
/// Uppercase the string
#[pyo3(text_signature = "(self)")]
fn uppercase(&mut self) {
self.normalized.uppercase();
}
/// Prepend the given sequence to the string
#[pyo3(text_signature = "(self, s)")]
fn prepend(&mut self, s: &str) {
self.normalized.prepend(s);
}
/// Append the given sequence to the string
#[pyo3(text_signature = "(self, s)")]
fn append(&mut self, s: &str) {
self.normalized.append(s);
}
/// Strip the left of the string
#[pyo3(text_signature = "(self)")]
fn lstrip(&mut self) {
self.normalized.lstrip();
}
/// Strip the right of the string
#[pyo3(text_signature = "(self)")]
fn rstrip(&mut self) {
self.normalized.rstrip();
}
/// Strip both ends of the string
#[pyo3(text_signature = "(self)")]
fn strip(&mut self) {
self.normalized.strip();
}
/// Clears the string
#[pyo3(text_signature = "(self)")]
fn clear(&mut self) {
self.normalized.clear();
}
/// Slice the string using the given range
#[pyo3(text_signature = "(self, range)")]
fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> {
slice(&self.normalized, &range)
}
/// Filter each character of the string using the given func
#[pyo3(text_signature = "(self, func)")]
fn filter(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
filter(&mut self.normalized, func)
}
/// Calls the given function for each character of the string
#[pyo3(text_signature = "(self, func)")]
fn for_each(&self, func: &Bound<'_, PyAny>) -> PyResult<()> {
for_each(&self.normalized, func)
}
/// Calls the given function for each character of the string
///
/// Replaces each character of the string using the returned value. Each
/// returned value **must** be a str of length 1 (ie a character).
#[pyo3(text_signature = "(self, func)")]
fn map(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
map(&mut self.normalized, func)
}
/// Split the NormalizedString using the given pattern and the specified behavior
///
/// Args:
/// pattern: Pattern:
/// A pattern used to split the string. Usually a string or a regex built with `tokenizers.Regex`
///
/// behavior: SplitDelimiterBehavior:
/// The behavior to use when splitting.
/// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next",
/// "contiguous"
///
/// Returns:
/// A list of NormalizedString, representing each split
#[pyo3(text_signature = "(self, pattern, behavior)")]
fn split(
&mut self,
pattern: PyPattern,
behavior: PySplitDelimiterBehavior,
) -> PyResult<Vec<PyNormalizedString>> {
Ok(ToPyResult(self.normalized.split(pattern, behavior.into()))
.into_py()?
.into_iter()
.map(|n| n.into())
.collect())
}
/// Replace the content of the given pattern with the provided content
///
/// Args:
/// pattern: Pattern:
/// A pattern used to match the string. Usually a string or a Regex
///
/// content: str:
/// The content to be used as replacement
#[pyo3(text_signature = "(self, pattern, content)")]
fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> {
ToPyResult(self.normalized.replace(pattern, content)).into()
}
fn __repr__(&self) -> String {
format!(
r#"NormalizedString(original="{}", normalized="{}")"#,
self.normalized.get_original(),
self.normalized.get()
)
}
fn __str__(&self) -> &str {
self.normalized.get()
}
fn __getitem__(&self, range: PyRange<'_>) -> PyResult<Option<PyNormalizedString>> {
slice(&self.normalized, &range)
}
}
impl From<NormalizedString> for PyNormalizedString {
fn from(normalized: NormalizedString) -> Self {
Self { normalized }
}
}
impl From<PyNormalizedString> for NormalizedString {
fn from(normalized: PyNormalizedString) -> Self {
normalized.normalized
}
}
#[pyclass(module = "tokenizers", name = "NormalizedStringRefMut")]
#[derive(Clone)]
pub struct PyNormalizedStringRefMut {
inner: RefMutContainer<NormalizedString>,
}
impl DestroyPtr for PyNormalizedStringRefMut {
fn destroy(&mut self) {
self.inner.destroy();
}
}
impl PyNormalizedStringRefMut {
pub fn new(normalized: &mut NormalizedString) -> RefMutGuard<Self> {
RefMutGuard::new(Self {
inner: RefMutContainer::new(normalized),
})
}
pub fn destroyed_error() -> PyErr {
exceptions::PyException::new_err("Cannot use a NormalizedStringRefMut outside `normalize`")
}
/// Provides a way to access a reference to the underlying NormalizedString
pub fn map_as_ref<F: FnOnce(&NormalizedString) -> U, U>(&self, f: F) -> PyResult<U> {
self.inner
.map(f)
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
/// Provides a way to access a mutable reference to the underlying NormalizedString
pub fn map_as_mut<F: FnOnce(&mut NormalizedString) -> U, U>(&mut self, f: F) -> PyResult<U> {
self.inner
.map_mut(f)
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
}
#[pymethods]
impl PyNormalizedStringRefMut {
#[getter]
fn get_normalized(&self) -> PyResult<String> {
self.inner
.map(|n| n.get().to_owned())
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
#[getter]
fn get_original(&self) -> PyResult<String> {
self.inner
.map(|n| n.get_original().to_owned())
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)
}
fn nfd(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfd();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn nfkd(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfkd();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn nfc(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfc();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn nfkc(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.nfkc();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn lowercase(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.lowercase();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn uppercase(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.uppercase();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn prepend(&mut self, s: &str) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.prepend(s);
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn append(&mut self, s: &str) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.append(s);
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn lstrip(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.lstrip();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn rstrip(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.rstrip();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn strip(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.strip();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn clear(&mut self) -> PyResult<()> {
self.inner
.map_mut(|n| {
n.clear();
})
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?;
Ok(())
}
fn slice(&self, range: PyRange) -> PyResult<Option<PyNormalizedString>> {
self.inner
.map(|n| slice(n, &range))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?
}
fn filter(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
self.inner
.map_mut(|n| filter(n, func))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)??;
Ok(())
}
fn for_each(&self, func: &Bound<'_, PyAny>) -> PyResult<()> {
self.inner
.map(|n| for_each(n, func))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)??;
Ok(())
}
fn map(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> {
self.inner
.map_mut(|n| map(n, func))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)??;
Ok(())
}
fn split(
&mut self,
pattern: PyPattern,
behavior: PySplitDelimiterBehavior,
) -> PyResult<Vec<PyNormalizedString>> {
Ok(ToPyResult(
self.inner
.map_mut(|n| n.split(pattern, behavior.into()))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?,
)
.into_py()?
.into_iter()
.map(|n| n.into())
.collect())
}
fn replace(&mut self, pattern: PyPattern, content: &str) -> PyResult<()> {
ToPyResult(
self.inner
.map_mut(|n| n.replace(pattern, content))
.ok_or_else(PyNormalizedStringRefMut::destroyed_error)?,
)
.into()
}
}
| tokenizers/bindings/python/src/utils/normalization.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/normalization.rs",
"repo_id": "tokenizers",
"token_count": 8490
} | 237 |
# Decoders
<tokenizerslangcontent>
<python>
## BPEDecoder
[[autodoc]] tokenizers.decoders.BPEDecoder
## ByteLevel
[[autodoc]] tokenizers.decoders.ByteLevel
## CTC
[[autodoc]] tokenizers.decoders.CTC
## Metaspace
[[autodoc]] tokenizers.decoders.Metaspace
## WordPiece
[[autodoc]] tokenizers.decoders.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/decoders.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/decoders.mdx",
"repo_id": "tokenizers",
"token_count": 197
} | 238 |
# Training from memory
In the [Quicktour](quicktour), we saw how to build and train a
tokenizer using text files, but we can actually use any Python Iterator.
In this section we'll see a few different ways of training our
tokenizer.
For all the examples listed below, we'll use the same [`~tokenizers.Tokenizer`] and
[`~tokenizers.trainers.Trainer`], built as
following:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START init_tokenizer_trainer",
"end-before": "END init_tokenizer_trainer",
"dedent": 8}
</literalinclude>
This tokenizer is based on the [`~tokenizers.models.Unigram`] model. It
takes care of normalizing the input using the NFKC Unicode normalization
method, and uses a [`~tokenizers.pre_tokenizers.ByteLevel`] pre-tokenizer with the corresponding decoder.
For more information on the components used here, you can check
[here](components).
## The most basic way
As you probably guessed already, the easiest way to train our tokenizer
is by using a `List`{.interpreted-text role="obj"}:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_basic",
"end-before": "END train_basic",
"dedent": 8}
</literalinclude>
Easy, right? You can use anything working as an iterator here, be it a
`List`{.interpreted-text role="obj"}, `Tuple`{.interpreted-text
role="obj"}, or a `np.Array`{.interpreted-text role="obj"}. Anything
works as long as it provides strings.
## Using the ð€ Datasets library
An awesome way to access one of the many datasets that exist out there
is by using the ð€ Datasets library. For more information about it, you
should check [the official documentation
here](https://huggingface.co/docs/datasets/).
Let's start by loading our dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START load_dataset",
"end-before": "END load_dataset",
"dedent": 8}
</literalinclude>
The next step is to build an iterator over this dataset. The easiest way
to do this is probably by using a generator:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START def_batch_iterator",
"end-before": "END def_batch_iterator",
"dedent": 8}
</literalinclude>
As you can see here, for improved efficiency we can actually provide a
batch of examples used to train, instead of iterating over them one by
one. By doing so, we can expect performances very similar to those we
got while training directly from files.
With our iterator ready, we just need to launch the training. In order
to improve the look of our progress bars, we can specify the total
length of the dataset:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START train_datasets",
"end-before": "END train_datasets",
"dedent": 8}
</literalinclude>
And that's it!
## Using gzip files
Since gzip files in Python can be used as iterators, it is extremely
simple to train on such files:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START single_gzip",
"end-before": "END single_gzip",
"dedent": 8}
</literalinclude>
Now if we wanted to train from multiple gzip files, it wouldn't be much
harder:
<literalinclude>
{"path": "../../bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"language": "python",
"start-after": "START multi_gzip",
"end-before": "END multi_gzip",
"dedent": 8}
</literalinclude>
And voilà !
| tokenizers/docs/source-doc-builder/training_from_memory.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/training_from_memory.mdx",
"repo_id": "tokenizers",
"token_count": 1199
} | 239 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("./_ext"))
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "tokenizers"
copyright = "2020, huggingface"
author = "huggingface"
# The full version, including alpha/beta/rc tags
release = ""
# -- Custom information ------------------------------------------------------
# The possible values for languages (used by `_ext/entities`)
languages = ["node", "rust", "python"]
# This defines the version used to generate links to docs.rs
rust_version = "latest"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "entities", "rust_doc", "toctree_tags"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"analytics_id": "UA-83738774-2"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
def setup(app):
for language in languages:
if not tags.has(language):
exclude_patterns.append(f"tutorials/{language}/*")
app.add_css_file("css/huggingface.css")
app.add_css_file("css/code-snippets.css")
app.add_js_file("js/custom.js")
| tokenizers/docs/source/conf.py/0 | {
"file_path": "tokenizers/docs/source/conf.py",
"repo_id": "tokenizers",
"token_count": 781
} | 240 |
#[macro_use]
extern crate criterion;
mod common;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use criterion::Criterion;
use tokenizers::models::wordpiece::{WordPiece, WordPieceTrainerBuilder};
use tokenizers::normalizers::{BertNormalizer, NormalizerWrapper};
use tokenizers::pre_tokenizers::bert::BertPreTokenizer;
use tokenizers::processors::bert::BertProcessing;
use tokenizers::{decoders, EncodeInput, Model, TokenizerImpl};
use common::{iter_bench_encode, iter_bench_encode_batch, iter_bench_train};
use tokenizers::decoders::DecoderWrapper;
use tokenizers::pre_tokenizers::whitespace::Whitespace;
use tokenizers::processors::PostProcessorWrapper;
static BATCH_SIZE: usize = 1_000;
type BertTokenizer = TokenizerImpl<
WordPiece,
BertNormalizer,
BertPreTokenizer,
BertProcessing,
decoders::wordpiece::WordPiece,
>;
/// Resembling the BertTokenizer implementation from the Python bindings.
fn create_bert_tokenizer(wp: WordPiece) -> BertTokenizer {
let sep_id = *wp.get_vocab().get("[SEP]").unwrap();
let cls_id = *wp.get_vocab().get("[CLS]").unwrap();
let mut tokenizer = TokenizerImpl::new(wp);
tokenizer.with_pre_tokenizer(Some(BertPreTokenizer));
tokenizer.with_normalizer(Some(BertNormalizer::default()));
tokenizer.with_decoder(Some(decoders::wordpiece::WordPiece::default()));
tokenizer.with_post_processor(Some(BertProcessing::new(
("[SEP]".to_string(), sep_id),
("[CLS]".to_string(), cls_id),
)));
tokenizer
}
pub fn bench_bert(c: &mut Criterion) {
let wp = WordPiece::from_file("data/bert-base-uncased-vocab.txt")
.build()
.unwrap();
let tokenizer = create_bert_tokenizer(wp);
let mut lines: Vec<EncodeInput> = vec![];
let mut batches: Vec<Vec<EncodeInput>> = vec![vec![]];
for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() {
let line: EncodeInput = line.unwrap().into();
lines.push(line.clone());
if batches.last().unwrap().len() >= BATCH_SIZE {
batches.push(vec![]);
}
batches.last_mut().unwrap().push(line);
}
c.bench_function("WordPiece BERT encode", |b| {
b.iter_custom(|iters| iter_bench_encode(iters, &tokenizer, &lines))
});
c.bench_function("WordPiece BERT encode batch", |b| {
b.iter_custom(|iters| iter_bench_encode_batch(iters, &tokenizer, &batches))
});
}
fn bench_train(c: &mut Criterion) {
let mut trainer = WordPieceTrainerBuilder::default()
.show_progress(false)
.build();
type Tok = TokenizerImpl<
WordPiece,
NormalizerWrapper,
Whitespace,
PostProcessorWrapper,
DecoderWrapper,
>;
let mut tokenizer = Tok::new(WordPiece::default());
tokenizer.with_pre_tokenizer(Some(Whitespace {}));
c.bench_function("WordPiece Train vocabulary (small)", |b| {
b.iter_custom(|iters| {
iter_bench_train(
iters,
&mut tokenizer,
&mut trainer,
vec!["data/small.txt".to_string()],
)
})
});
let mut tokenizer = Tok::new(WordPiece::default());
tokenizer.with_pre_tokenizer(Some(Whitespace {}));
c.bench_function("WordPiece Train vocabulary (big)", |b| {
b.iter_custom(|iters| {
iter_bench_train(
iters,
&mut tokenizer,
&mut trainer,
vec!["data/big.txt".to_string()],
)
})
});
}
criterion_group! {
name = bert_benches;
config = Criterion::default().sample_size(20);
targets = bench_bert
}
criterion_group! {
name = benches_train;
config = Criterion::default().sample_size(10);
targets = bench_train
}
criterion_main!(bert_benches, benches_train);
| tokenizers/tokenizers/benches/bert_benchmark.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/bert_benchmark.rs",
"repo_id": "tokenizers",
"token_count": 1657
} | 241 |
language: node_js
node_js: "10"
script:
- ./node_modules/.bin/webpack
| tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml",
"repo_id": "tokenizers",
"token_count": 30
} | 242 |
use crate::decoders::DecoderWrapper;
use crate::tokenizer::{Decoder, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
decoders: Vec<DecoderWrapper>,
}
impl Sequence {
pub fn new(decoders: Vec<DecoderWrapper>) -> Self {
Self { decoders }
}
pub fn get_decoders(&self) -> &[DecoderWrapper] {
&self.decoders
}
pub fn get_decoders_mut(&mut self) -> &mut [DecoderWrapper] {
&mut self.decoders
}
}
impl Decoder for Sequence {
fn decode_chain(&self, mut tokens: Vec<String>) -> Result<Vec<String>> {
for decoder in &self.decoders {
tokens = decoder.decode_chain(tokens)?;
}
Ok(tokens)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::decoders::ctc::CTC;
use crate::pre_tokenizers::metaspace::Metaspace;
#[test]
fn sequence_basic() {
let decoders = vec![
DecoderWrapper::CTC(CTC::default()),
DecoderWrapper::Metaspace(Metaspace::default()),
];
let decoder = Sequence::new(decoders);
let tokens: Vec<String> = vec!["â", "â", "H", "H", "i", "i", "â", "y", "o", "u"]
.into_iter()
.map(|s| s.to_string())
.collect();
let out_tokens = decoder.decode(tokens).unwrap();
assert_eq!(out_tokens, "Hi you");
}
}
| tokenizers/tokenizers/src/decoders/sequence.rs/0 | {
"file_path": "tokenizers/tokenizers/src/decoders/sequence.rs",
"repo_id": "tokenizers",
"token_count": 689
} | 243 |
use super::OrderedVocabIter;
use crate::tokenizer::{Model, Result, Token};
use serde_json::Value;
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufReader, Read, Write};
use std::path::{Path, PathBuf};
mod serialization;
mod trainer;
// Re-export
pub use trainer::*;
type Vocab = HashMap<String, u32>;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("WordLevel error: Missing [UNK] token from the vocabulary")]
MissingUnkToken,
#[error("Bad vocabulary json file")]
BadVocabulary,
}
struct Config {
files: Option<String>,
vocab: HashMap<String, u32>,
unk_token: String,
}
/// A `WordLevelBuilder` can be used to create a `WordLevel`
/// model with a custom configuration.
pub struct WordLevelBuilder {
config: Config,
}
impl Default for WordLevelBuilder {
fn default() -> Self {
Self {
config: Config {
files: None,
vocab: HashMap::new(),
unk_token: String::from("<unk>"),
},
}
}
}
impl WordLevelBuilder {
/// Construct a new `WordLevelBuilder`.
pub fn new() -> Self {
Self::default()
}
/// Set the input files.
#[must_use]
pub fn files(mut self, vocab: String) -> Self {
self.config.files = Some(vocab);
self
}
/// Set the vocab (token -> ID) mapping.
#[must_use]
pub fn vocab(mut self, vocab: HashMap<String, u32>) -> Self {
self.config.vocab = vocab;
self
}
/// The the `UNK` token for the vocab.
#[must_use]
pub fn unk_token(mut self, unk_token: String) -> Self {
self.config.unk_token = unk_token;
self
}
/// Contructs a `WordLevel` model that uses the `WordLevelBuilder`'s configuration.
pub fn build(mut self) -> Result<WordLevel> {
if let Some(vocab) = self.config.files {
self.config.vocab = WordLevel::read_file(&vocab)?;
}
let vocab_r = self
.config
.vocab
.iter()
.map(|(key, val)| (*val, key.to_owned()))
.collect();
Ok(WordLevel {
vocab: self.config.vocab,
vocab_r,
unk_token: self.config.unk_token,
})
}
}
#[derive(PartialEq, Clone, Eq)]
pub struct WordLevel {
vocab: HashMap<String, u32>,
vocab_r: HashMap<u32, String>,
pub unk_token: String,
}
impl std::fmt::Debug for WordLevel {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("WordLevel")
.field("unk_token", &self.unk_token)
.field("vocab", &self.vocab.len())
.finish()
}
}
impl WordLevel {
pub fn builder() -> WordLevelBuilder {
WordLevelBuilder::new()
}
pub fn read_file(vocab_path: &str) -> Result<Vocab> {
let vocab_file = File::open(vocab_path)?;
let mut vocab_file = BufReader::new(vocab_file);
let mut buffer = String::new();
let mut vocab = HashMap::new();
vocab_file.read_to_string(&mut buffer)?;
let json: Value = serde_json::from_str(&buffer)?;
match json {
Value::Object(m) => {
for (token, id) in m {
if let Value::Number(id) = id {
let id = id.as_u64().ok_or(Error::BadVocabulary)? as u32;
vocab.insert(token, id);
}
}
}
_ => return Err(Box::new(Error::BadVocabulary)),
};
Ok(vocab)
}
/// Initialize a WordLevel model from vocab and merges file.
pub fn from_file(vocab_path: &str, unk_token: String) -> Result<WordLevel> {
let vocab = WordLevel::read_file(vocab_path)?;
Self::builder().vocab(vocab).unk_token(unk_token).build()
}
}
impl Default for WordLevel {
fn default() -> Self {
Self {
vocab: HashMap::new(),
vocab_r: HashMap::new(),
unk_token: String::from("<unk>"),
}
}
}
impl Model for WordLevel {
type Trainer = WordLevelTrainer;
fn tokenize(&self, token: &str) -> Result<Vec<Token>> {
if let Some(&id) = self.vocab.get(token) {
Ok(vec![Token {
id,
value: token.to_owned(),
offsets: (0, token.len()),
}])
} else if let Some(&unk_id) = self.vocab.get(&self.unk_token) {
Ok(vec![Token {
id: unk_id,
value: self.unk_token.to_owned(),
offsets: (0, token.len()),
}])
} else {
Err(Box::new(Error::MissingUnkToken))
}
}
fn token_to_id(&self, token: &str) -> Option<u32> {
self.vocab.get(token).copied()
}
fn id_to_token(&self, id: u32) -> Option<String> {
self.vocab_r.get(&id).cloned()
}
fn get_vocab(&self) -> HashMap<String, u32> {
self.vocab.clone()
}
fn get_vocab_size(&self) -> usize {
self.vocab.keys().len()
}
fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> {
let vocab_file_name = match name {
Some(name) => format!("{}-vocab.json", name),
None => "vocab.json".to_string(),
};
// Write vocab.json
let vocab_path: PathBuf = [folder, Path::new(vocab_file_name.as_str())]
.iter()
.collect();
let mut vocab_file = File::create(&vocab_path)?;
let order_vocab_iter = OrderedVocabIter::new(&self.vocab_r);
let serialized = serde_json::to_string(&order_vocab_iter)?;
vocab_file.write_all(serialized.as_bytes())?;
Ok(vec![vocab_path])
}
fn get_trainer(&self) -> Self::Trainer {
WordLevelTrainer::default()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_tokenize_unk() {
let vocab: Vocab = [("<unk>".into(), 0), ("a".into(), 1), ("b".into(), 2)]
.iter()
.cloned()
.collect();
let wordlevel = WordLevelBuilder::default()
.vocab(vocab)
.unk_token("<unk>".to_string())
.build()
.unwrap();
let tokens = wordlevel.tokenize("c").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "<unk>".into(), (0, 1)),]);
let tokens = wordlevel.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(1u32, "a".into(), (0, 1)),]);
}
#[test]
fn test_tokenize_missing_unk_token() {
let vocab: Vocab = [("a".into(), 0), ("b".into(), 1)].iter().cloned().collect();
let wordlevel = WordLevelBuilder::default().vocab(vocab).build().unwrap();
let tokens = wordlevel.tokenize("a").unwrap();
assert_eq!(tokens, vec![Token::new(0u32, "a".into(), (0, 1)),]);
let error = wordlevel.tokenize("c").err().unwrap();
assert!(error.is::<Error>());
}
}
| tokenizers/tokenizers/src/models/wordlevel/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordlevel/mod.rs",
"repo_id": "tokenizers",
"token_count": 3383
} | 244 |
use std::collections::{HashMap, HashSet};
use crate::utils::SysRegex;
use serde::{Deserialize, Serialize};
use crate::tokenizer::{
Decoder, Encoding, PostProcessor, PreTokenizedString, PreTokenizer, Result,
SplitDelimiterBehavior,
};
use crate::utils::macro_rules_attribute;
/// Converts bytes to unicode characters.
/// See https://github.com/openai/gpt-2/blob/master/src/encoder.py#L9
pub(crate) fn bytes_char() -> HashMap<u8, char> {
let mut bs: Vec<u8> = vec![];
bs.extend(b'!'..=b'~');
bs.extend(b'\xA1'..=b'\xAC');
bs.extend(b'\xAE'..=b'\xFF');
let mut cs: Vec<u32> = bs.iter().map(|i| *i as u32).collect();
let mut n = 0;
for b in 0..=255u8 {
if !bs.contains(&b) {
bs.push(b);
cs.push(u32::pow(2, 8) + n);
n += 1;
}
}
bs.into_iter()
.zip(cs)
.map(|(f, t)| (f, unsafe { std::char::from_u32_unchecked(t) }))
.collect()
}
lazy_static! {
/// Regex that matches exactly one token.
/// See https://github.com/openai/gpt-2/blob/master/src/encoder.py#L98
static ref RE: SysRegex = SysRegex::new(
r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"
)
.unwrap();
static ref BYTES_CHAR: HashMap<u8, char> = bytes_char();
static ref CHAR_BYTES: HashMap<char, u8> =
bytes_char().into_iter().map(|(c, b)| (b, c)).collect();
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// Provides all the necessary steps to handle the BPE tokenization at the byte-level. Takes care
/// of all the required processing steps to transform a UTF-8 string as needed before and after the
/// BPE model does its job.
#[macro_rules_attribute(impl_serde_type!)]
#[non_exhaustive]
pub struct ByteLevel {
/// Whether to add a leading space to the first word. This allows to treat the leading word
/// just as any other word.
pub add_prefix_space: bool,
/// Whether the post processing step should trim offsets to avoid including whitespaces.
pub trim_offsets: bool,
/// Whether to use the standard GPT2 regex for whitespace splitting
/// Set it to False if you want to use your own splitting.
#[serde(default = "default_true")]
pub use_regex: bool,
}
fn default_true() -> bool {
true
}
impl Default for ByteLevel {
fn default() -> Self {
Self {
add_prefix_space: true,
trim_offsets: true,
use_regex: true,
}
}
}
impl ByteLevel {
pub fn new(add_prefix_space: bool, trim_offsets: bool, use_regex: bool) -> Self {
Self {
add_prefix_space,
trim_offsets,
use_regex,
}
}
pub fn alphabet() -> HashSet<char> {
BYTES_CHAR.values().copied().collect()
}
#[must_use]
pub fn add_prefix_space(mut self, v: bool) -> Self {
self.add_prefix_space = v;
self
}
#[must_use]
pub fn trim_offsets(mut self, v: bool) -> Self {
self.trim_offsets = v;
self
}
#[must_use]
pub fn use_regex(mut self, v: bool) -> Self {
self.use_regex = v;
self
}
}
/// As a `PreTokenizer`, `ByteLevel` is in charge of transforming all the unicode characters into
/// their byte-level counterpart. It also splits the input according to the configured regex.
// TODO: Give the ability to modify this regex
impl PreTokenizer for ByteLevel {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
let re_ref: &SysRegex = &RE;
pretokenized.split(|_, mut normalized| {
if self.add_prefix_space && !normalized.get().starts_with(' ') {
normalized.prepend(" ");
}
if self.use_regex {
normalized.split(re_ref, SplitDelimiterBehavior::Isolated)
} else {
Ok(vec![normalized])
}
})?;
pretokenized.normalize(|normalized| {
let s = normalized.get();
let mut transformations: Vec<(char, isize)> = Vec::with_capacity(s.len());
let mut i = 0;
for cur_char in s.chars() {
let size = cur_char.len_utf8();
let bytes = s[i..i + size].as_bytes();
i += size;
transformations.extend(
bytes
.iter()
.enumerate()
.map(|(i, b)| (BYTES_CHAR[b], isize::from(i > 0))),
);
}
normalized.transform(transformations, 0);
Ok(())
})
}
}
/// As a `Decoder`, `ByteLevel` is in charge of converting any byte-level characters to their
/// unicode counterpart, before merging everything back into a single String.
/// This decoder will consume the tokens and merge them in one step to alleviate
/// the fact that single token decoded might be a byte not representable as
/// as String.
impl Decoder for ByteLevel {
fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> {
let toks = tokens
.into_iter()
.flat_map(|t| {
t.chars()
.try_fold(vec![], |mut acc, c| {
CHAR_BYTES.get(&c).map(|b| {
acc.push(*b);
acc
})
})
.unwrap_or_else(|| t.as_bytes().to_vec())
})
.collect::<Vec<u8>>();
Ok(vec![String::from_utf8_lossy(&toks).to_string()])
}
}
/// As a `PostProcessor`, `ByteLevel` is in charge of trimming the offsets if necessary.
impl PostProcessor for ByteLevel {
fn added_tokens(&self, _is_pair: bool) -> usize {
0
}
fn process_encodings(
&self,
mut encodings: Vec<Encoding>,
_add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
if self.trim_offsets {
for encoding in encodings.iter_mut() {
process_offsets(encoding, self.add_prefix_space);
encoding
.get_overflowing_mut()
.iter_mut()
.for_each(|encoding| process_offsets(encoding, self.add_prefix_space));
}
}
for (i, encoding) in encodings.iter_mut().enumerate() {
encoding.set_sequence_id(i);
}
Ok(encodings)
//<dyn PostProcessor>::default_process(encodings, add_special_tokens)
}
}
pub fn process_offsets(encoding: &mut Encoding, add_prefix_space: bool) {
encoding.process_tokens_with_offsets_mut(|(i, (token, offsets))| {
let mut leading_spaces = token
.chars()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
let trailing_spaces = token
.chars()
.rev()
.take_while(|c| *c == BYTES_CHAR[&b' '] || c.is_whitespace())
.count();
if leading_spaces > 0 || trailing_spaces > 0 {
if leading_spaces > 0 {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let is_first = i == 0 || offsets.0 == 0;
if is_first && add_prefix_space && leading_spaces == 1 {
// If we are processing the first pair of offsets, with `add_prefix_space`,
// then we shouldn't remove anything we added. If there are more than one
// leading spaces though, it means we didn't add them, and they should be
// removed.
leading_spaces = 0;
}
offsets.0 = std::cmp::min(offsets.0 + leading_spaces, offsets.1);
}
if trailing_spaces > 0 && offsets.1 >= trailing_spaces {
offsets.1 = std::cmp::max(offsets.1 - trailing_spaces, offsets.0);
}
}
});
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tokenizer::{
Decoder, Encoding, OffsetReferential, OffsetType, PostProcessor, PreTokenizedString,
PreTokenizer,
};
use std::iter::FromIterator;
#[test]
fn pre_tokenization() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ä my", (5, 8)),
("Ä friend", (8, 15)),
(",", (15, 16)),
("Ä how", (16, 20)),
("Ä is", (20, 23)),
("Ä your", (23, 28)),
("Ä day", (28, 32)),
("Ä going", (32, 38)),
("?", (38, 39))
]
);
}
#[test]
fn pre_tokenization_no_regex() {
let bytelevel = ByteLevel::default().use_regex(false);
let mut pretokenized: PreTokenizedString = "Hello my friend, how is your day going?".into();
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("Ä HelloÄ myÄ friend,Ä howÄ isÄ yourÄ dayÄ going?", (0, 39))]
);
}
#[test]
fn decoding() {
let bytelevel = ByteLevel::default().add_prefix_space(false);
assert_eq!(
bytelevel
.decode_chain(
vec![
"Hello", "Ä my", "Ä friend", ",", "Ä how", "Ä is", "Ä your", "Ä day", "Ä going",
"?"
]
.into_iter()
.map(|s| s.into())
.collect::<Vec<String>>()
)
.unwrap(),
vec!["Hello my friend, how is your day going?"]
);
}
#[test]
fn add_prefix_space() {
let bytelevel = ByteLevel::default().add_prefix_space(true);
for s in &[
" Hello my friend, how is your day going?",
"Hello my friend, how is your day going?",
] {
let mut pretokenized = PreTokenizedString::from(*s);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Ä Hello", (0, 7)),
("Ä my", (7, 11)),
("Ä friend", (11, 19)),
(",", (19, 20)),
("Ä how", (20, 25)),
("Ä is", (25, 29)),
("Ä your", (29, 35)),
("Ä day", (35, 40)),
("Ä going", (40, 47)),
("?", (47, 48))
]
);
}
}
#[test]
fn decode_works_on_separated_tokens() {
let samples = vec![
"A Nuskhuri abbreviation of ááá¡á£ á¥á áá¡á¢á ( iesu kriste ) \" Jesus Christ \"",
"An equal number have descenders , like p or q in English \
: á , á , á , á , á , á , á , ᢠ, ᣠ, ဠ, አ, ᧠, áª",
];
let bytelevel = ByteLevel::default().add_prefix_space(false);
for sample in samples {
let mut pretokenized = PreTokenizedString::from(sample);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
let separated_tokens = pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.iter()
.flat_map(|(s, _, _)| s.split("").map(|t| t.into()))
.collect::<Vec<_>>();
assert_eq!(
sample,
bytelevel.decode_chain(separated_tokens).unwrap().join("")
);
}
}
#[test]
fn handling_of_newlines() {
let mut pretokenized = PreTokenizedString::from("Hello there\nHello there");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ä there", (5, 11)),
("Ä", (11, 12)),
("Hello", (12, 17)),
("Ä there", (17, 23))
]
);
}
#[test]
fn handling_of_multiple_whitespaces() {
let mut pretokenized = PreTokenizedString::from("Hello there dear");
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hello", (0, 5)),
("Ä there", (5, 11)),
("Ä Ä Ä Ä Ä Ä ", (11, 17)),
("Ä dear", (17, 22))
]
);
}
#[test]
fn offsets_when_char_split_up() {
let input = "iâ¢j";
let mut pretokenized = PreTokenizedString::from(input);
let bytelevel = ByteLevel::default().add_prefix_space(false);
bytelevel.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âÅ¢", (1, 4)), ("j", (4, 5))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Normalized, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![("i", (0, 1)), ("âÅ¢", (1, 7)), ("j", (7, 8))]
);
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(_, o, _)| &input[o.0..o.1])
.collect::<Vec<_>>(),
vec!["i", "â¢", "j"]
);
}
#[test]
fn processor_trims_offsets_pre_tokenized() {
// If user uses `is_pretokenized=True` we might have
// offsets that might begin at the start of the string but are
// NOT the first token.
let mut encoding = Encoding::new(
vec![0; 5],
vec![],
vec!["Ä l".into(), "ove".into(), "Ä l".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
HashMap::new(),
);
process_offsets(&mut encoding, true);
assert_eq!(
encoding,
Encoding::new(
vec![0; 5],
vec![],
vec!["Ä l".into(), "ove".into(), "Ä l".into(), "ove".into()],
vec![],
vec![(0, 1), (1, 4), (0, 1), (1, 4)],
vec![],
vec![],
vec![],
HashMap::new(),
)
);
}
#[test]
fn processor_trims_offsets() {
let start = Encoding::new(
vec![0; 5],
vec![],
vec![
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
],
vec![],
vec![(0, 1), (0, 11), (11, 18), (18, 25), (25, 29)],
vec![],
vec![],
vec![],
HashMap::new(),
);
let expected = Encoding::new(
vec![0; 5],
vec![0; 5],
vec![
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
],
vec![],
vec![(0, 0), (4, 9), (13, 18), (18, 23), (29, 29)],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5)]),
);
let bytelevel = ByteLevel::default().trim_offsets(true);
assert_eq!(
expected,
bytelevel.process(start.clone(), None, false).unwrap()
);
let pair_expected = Encoding::new(
vec![0; 10],
vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
vec![
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
"Ä ".into(),
"Ä Ä Ä Ä HelloÄ Ä ".into(),
"Ä Ä Hello".into(),
"HelloÄ Ä ".into(),
"Ä Ä Ä Ä ".into(),
],
vec![],
vec![
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
(0, 0),
(4, 9),
(13, 18),
(18, 23),
(29, 29),
],
vec![],
vec![],
vec![],
HashMap::from_iter(vec![(0, 0..5), (1, 5..10)]),
);
assert_eq!(
pair_expected,
bytelevel
.process(start.clone(), Some(start), false)
.unwrap()
);
}
#[test]
fn decode_unknown_characters() {
let byte_level = ByteLevel::default();
assert_eq!(
byte_level
.decode_chain(vec![
"Hello".into(),
"Ä there".into(),
"Ä dear".into(),
"Ä friend!".into(),
"Ä ".into(),
"[PA D]".into()
])
.unwrap(),
vec!["Hello there dear friend! [PA D]"]
);
}
#[test]
fn deserialization() {
// Before use_regex
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false}"#,
)
.unwrap();
assert!(byte_level.use_regex);
// Loading works, new future BC test.
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": true}"#,
)
.unwrap();
assert!(byte_level.use_regex);
let byte_level: ByteLevel = serde_json::from_str(
r#"{"type": "ByteLevel", "add_prefix_space": true, "trim_offsets": false, "use_regex": false}"#,
)
.unwrap();
assert!(!byte_level.use_regex);
}
}
| tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs",
"repo_id": "tokenizers",
"token_count": 10935
} | 245 |
//! # Template Processing
//!
//! Provides a way to specify templates in order to add the special tokens to each
//! input sequence as relevant.
//!
//! ## Example
//!
//! Let's take `BERT` tokenizer as an example. It uses two special tokens, used to
//! delimitate each sequence. `[CLS]` is always used at the beginning of the first
//! sequence, and `[SEP]` is added at the end of both the first, and the pair
//! sequences. The final result looks like this:
//! - Single sequence: `[CLS] Hello there [SEP]`
//! - Pair sequences: `[CLS] My name is Anthony [SEP] What is my name? [SEP]`
//!
//! With the type ids as following:
//! ```markdown
//! [CLS] ... [SEP] ... [SEP]
//! 0 0 0 1 1
//! ```
//!
//! So, we can define a [`TemplateProcessing`] that will achieve this result:
//! ```
//! # use tokenizers::processors::template::TemplateProcessing;
//! let template = TemplateProcessing::builder()
//! // The template when we only have a single sequence:
//! .try_single(vec!["[CLS]", "$0", "[SEP]"]).unwrap()
//! // Same as:
//! .try_single("[CLS] $0 [SEP]").unwrap()
//!
//! // The template when we have both sequences:
//! .try_pair(vec!["[CLS]:0", "$A:0", "[SEP]:0", "$B:1", "[SEP]:1"]).unwrap()
//! // Same as:
//! .try_pair("[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1").unwrap()
//! // Or:
//! .try_pair("[CLS] $0 [SEP] $B:1 [SEP]:1").unwrap()
//!
//! // The list of special tokens used by each sequences
//! .special_tokens(vec![("[CLS]", 1), ("[SEP]", 0)])
//! .build()
//! .unwrap();
//! ```
//!
//! In this example, each input sequence is identified using a `$` construct. This identifier
//! lets us specify each input sequence, and the type_id to use. When nothing is specified,
//! it uses the default values. Here are the different ways to specify it:
//! - Specifying the sequence, with default `type_id == 0`: `$A` or `$B`
//! - Specifying the `type_id` with default `sequence == A`: `$0`, `$1`, `$2`, ...
//! - Specifying both: `$A:0`, `$B:1`, ...
//!
//! The same construct is used for special tokens: `<identifier>(:<type_id>)?`.
//!
//! **Warning**: You must ensure that you are giving the correct tokens/ids as these will
//! be added to the `Encoding` without any further check. If the given ids correspond to
//! something totally different in a `Tokenizer` using this `PostProcessor`, it might lead
//! to unexpected results.
//!
//! [`TemplateProcessing`]: struct.TemplateProcessing.html
//!
use crate::{Encoding, PostProcessor, Result};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::convert::{TryFrom, TryInto};
use std::result::Result as StdResult;
/// Represents any sequences received as input of the PostProcessor
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub enum Sequence {
/// This is the first sequence, the one that is always specified
A,
/// This is the pair sequence, that is optional
B,
}
/// Represents the different kind of pieces that constitute a template.
/// It can be either the input sequence or a [`SpecialToken`]:
///
/// - The `Sequence` has an associated `type_id` which is used by default
/// for any token inside this sequence. The `Sequence` corresponds to one
/// of the input sequence given as input of the `PostProcessor`.
///
/// - The `SpecialToken` has an associated `id`. It corresponds to a [`SpecialToken`].
///
/// The easiest way to build a `Piece` is actually by converting it from a string:
/// ```
/// # use tokenizers::processors::template::Piece;
/// # use std::convert::TryFrom;
/// let sequence_with_type_id_0 = Piece::try_from("$0").unwrap();
/// let sequence_with_type_id_1 = Piece::try_from("$1").unwrap();
/// let special_token_cls = Piece::try_from("[CLS]").unwrap();
/// ```
///
/// [`SpecialToken`]: struct.SpecialToken.html
///
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub enum Piece {
Sequence { id: Sequence, type_id: u32 },
SpecialToken { id: String, type_id: u32 },
}
impl Piece {
fn extract_id(s: &str) -> Option<Self> {
if s.starts_with('$') {
let rest = &s['$'.len_utf8()..];
// If the id is just `$`, we use 0 as type_id, and Sequence A
match rest {
"" => Some(Self::Sequence {
id: Sequence::A,
type_id: 0,
}),
"A" | "a" => Some(Self::Sequence {
id: Sequence::A,
type_id: 0,
}),
"B" | "b" => Some(Self::Sequence {
id: Sequence::B,
type_id: 0,
}),
n => {
if let Ok(type_id) = n.parse::<u32>() {
Some(Self::Sequence {
id: Sequence::A,
type_id,
})
} else {
None
}
}
}
} else {
Some(Self::SpecialToken {
id: s.to_owned(),
type_id: 0,
})
}
}
fn with_type_id(self, type_id: u32) -> Self {
match self {
Self::Sequence { id, .. } => Self::Sequence { id, type_id },
Self::SpecialToken { id, .. } => Self::SpecialToken { id, type_id },
}
}
}
impl TryFrom<String> for Piece {
type Error = String;
fn try_from(s: String) -> StdResult<Self, Self::Error> {
let parts = s.split(':').collect::<Vec<_>>();
let err = || format!("Cannot build Piece from string \"{}\"", s);
match parts.as_slice() {
[id, type_id] => {
let type_id: u32 = type_id.parse().map_err(|_| err())?;
let piece = Self::extract_id(id).ok_or_else(err)?;
Ok(piece.with_type_id(type_id))
}
[id] => Self::extract_id(id).ok_or_else(err),
_ => Err(err()),
}
}
}
impl TryFrom<&str> for Piece {
type Error = String;
fn try_from(s: &str) -> StdResult<Self, Self::Error> {
Piece::try_from(s.to_owned())
}
}
/// Represents a bunch of tokens to be used in a template.
/// Usually, special tokens have only one associated id/token but in
/// some cases, it might be interesting to have multiple ids/tokens.
///
/// # Examples
/// ```
/// # use tokenizers::processors::template::SpecialToken;
/// // Simple cases, where a single id/token is necessary:
/// let cls = SpecialToken::from(("[CLS]", 1));
/// let sep = SpecialToken::from((0, "[SEP]")); // The order in the tuple is not important
///
/// // More complex case with multiple values:
/// let complex = SpecialToken::new(
/// "A complex special token:".into(),
/// vec![0, 1, 2, 3, 4],
/// vec!["A".into(), "complex".into(), "special".into(), "token".into(), ":".into()]
/// ).unwrap();
/// ```
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
pub struct SpecialToken {
/// A unique id used to identify this SpecialToken in the template
id: String,
/// The list of associated ids
ids: Vec<u32>,
/// The list of associated tokens
tokens: Vec<String>,
}
impl From<(String, u32)> for SpecialToken {
fn from(v: (String, u32)) -> Self {
Self {
id: v.0.clone(),
ids: vec![v.1],
tokens: vec![v.0],
}
}
}
impl From<(&str, u32)> for SpecialToken {
fn from(v: (&str, u32)) -> Self {
Self::from((v.0.to_owned(), v.1))
}
}
impl From<(u32, String)> for SpecialToken {
fn from(v: (u32, String)) -> Self {
Self::from((v.1, v.0))
}
}
impl From<(u32, &str)> for SpecialToken {
fn from(v: (u32, &str)) -> Self {
Self::from((v.1.to_owned(), v.0))
}
}
impl SpecialToken {
pub fn new(id: String, ids: Vec<u32>, tokens: Vec<String>) -> Result<Self> {
if ids.len() != tokens.len() {
Err("SpecialToken: ids and tokens must be of the same length".into())
} else {
Ok(Self { id, ids, tokens })
}
}
}
/// A Template represents a Vec<[`Piece`]>.
///
/// We can easily build one as follows
/// ```
/// # use tokenizers::processors::template::Template;
/// # use std::convert::TryFrom;
/// // By providing a `String` or `&str`, we just split on whitespaces:
/// let template = Template::try_from("[CLS] $0 [SEP]").unwrap();
///
/// // By providing pieces directly:
/// let template = Template::try_from(vec!["[CLS]", "$0", "[SEP]"]).unwrap();
/// ```
/// Both of these methods give the same result.
///
/// [`Piece`]: enum.Piece.html
///
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)]
#[serde(transparent)]
pub struct Template(Vec<Piece>);
impl<T> TryFrom<Vec<T>> for Template
where
T: TryInto<Piece, Error = String>,
{
type Error = String;
fn try_from(v: Vec<T>) -> StdResult<Self, Self::Error> {
Ok(Self(
v.into_iter()
.map(|p| p.try_into())
.collect::<StdResult<Vec<_>, Self::Error>>()?,
))
}
}
impl TryFrom<String> for Template {
type Error = String;
fn try_from(s: String) -> StdResult<Self, Self::Error> {
Self::try_from(s.as_ref())
}
}
impl TryFrom<&str> for Template {
type Error = String;
fn try_from(s: &str) -> StdResult<Self, Self::Error> {
Self::try_from(s.split(' ').collect::<Vec<_>>())
}
}
/// A bunch of [`SpecialToken`] represented by their ID.
/// Internally, `Tokens` is a `HashMap<String, SpecialToken>` and can be built
/// from a HashMap or a Vec<[`SpecialToken`]>.
///
/// [`SpecialToken`]: struct.SpecialToken.html
#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, Eq)]
#[serde(transparent)]
pub struct Tokens(
#[serde(serialize_with = "crate::utils::ordered_map")] pub HashMap<String, SpecialToken>,
);
impl<T: Into<SpecialToken>> From<Vec<T>> for Tokens {
fn from(v: Vec<T>) -> Self {
Self(
v.into_iter()
.map(|t| {
let token: SpecialToken = t.into();
(token.id.clone(), token)
})
.collect(),
)
}
}
impl From<HashMap<String, SpecialToken>> for Tokens {
fn from(v: HashMap<String, SpecialToken>) -> Self {
Self(v)
}
}
/// This PostProcessor takes care of processing each input `Encoding` by applying
/// the corresponding template, before merging them in the final Encoding.
///
/// A `Template` is actually a sequence of `Piece` that will be
/// concatenated together in the given order. Each `Piece` represents either
/// one of the input `Encoding` or a `SpecialToken`.
///
/// ## Example
/// ```
/// # use tokenizers::processors::template::TemplateProcessing;
/// let template = TemplateProcessing::builder()
/// .try_single("[CLS] $A [SEP]").unwrap()
/// .try_pair("[CLS] $A [SEP] $B:1 [SEP]:1").unwrap()
/// .special_tokens(vec![("[CLS]", 1), ("[SEP]", 0)])
/// .build()
/// .unwrap();
/// ```
///
#[derive(Debug, Clone, PartialEq, Builder, Serialize, Deserialize, Eq)]
#[serde(tag = "type", from = "TemplateProcessingDeserializer")]
#[builder(build_fn(validate = "Self::validate"))]
pub struct TemplateProcessing {
#[builder(try_setter, default = "\"$0\".try_into().unwrap()")]
single: Template,
#[builder(try_setter, default = "\"$A:0 $B:1\".try_into().unwrap()")]
pair: Template,
#[builder(setter(skip), default = "self.default_added(true)")]
#[serde(skip)]
added_single: usize,
#[builder(setter(skip), default = "self.default_added(false)")]
#[serde(skip)]
added_pair: usize,
#[builder(setter(into), default)]
special_tokens: Tokens,
}
impl From<&str> for TemplateProcessingBuilderError {
fn from(e: &str) -> Self {
e.to_string().into()
}
}
impl PartialEq for TemplateProcessingBuilderError {
fn eq(&self, other: &Self) -> bool {
self.to_string() == other.to_string()
}
}
/// We use this custom deserializer to provided the values for `added_single`
/// and `added_pair` during deserialization, while not having to serialize them
#[doc(hidden)]
#[derive(Deserialize)]
#[serde(tag = "type")]
struct TemplateProcessingDeserializer {
single: Template,
pair: Template,
special_tokens: Tokens,
}
impl From<TemplateProcessingDeserializer> for TemplateProcessing {
fn from(t: TemplateProcessingDeserializer) -> Self {
let added_single = count_added(&t.single, Some(&t.special_tokens));
let added_pair = count_added(&t.pair, Some(&t.special_tokens));
Self {
single: t.single,
pair: t.pair,
added_single,
added_pair,
special_tokens: t.special_tokens,
}
}
}
/// Count the number of added tokens in the given template
fn count_added(container: &Template, special_tokens: Option<&Tokens>) -> usize {
container
.0
.iter()
.map(|p| match p {
Piece::Sequence { .. } => 0,
Piece::SpecialToken { id, .. } => {
special_tokens.map_or(0, |spt| spt.0.get(id).map_or(0, |s| s.ids.len()))
}
})
.sum()
}
impl TemplateProcessingBuilder {
fn default_added(&self, is_single: bool) -> usize {
let container = if is_single {
self.single.as_ref()
} else {
self.pair.as_ref()
};
container.map_or(0, |pieces| {
count_added(pieces, self.special_tokens.as_ref())
})
}
fn validate(&self) -> std::result::Result<(), String> {
let pair_has_both = self.pair.as_ref().map_or(true, |pair| {
let mut has_a = false;
let mut has_b = false;
for piece in &pair.0 {
if let Piece::Sequence {
id: Sequence::A, ..
} = piece
{
has_a = true;
}
if let Piece::Sequence {
id: Sequence::B, ..
} = piece
{
has_b = true;
}
}
has_a && has_b
});
if !pair_has_both {
return Err("Template for `pair` must use both sequences".into());
}
let check = |sp| {
let exist = self
.special_tokens
.as_ref()
.map_or(false, |map| map.0.contains_key(sp));
match exist {
false => Some(sp),
true => None,
}
};
let empty = [];
let missing: HashSet<&str> = self
.single
.as_ref()
.map_or(empty.iter(), |s| s.0.iter())
.chain(self.pair.as_ref().map_or(empty.iter(), |s| s.0.iter()))
.filter_map(|piece| match piece {
Piece::Sequence { .. } => None,
Piece::SpecialToken { id, .. } => check(id.as_ref()),
})
.collect::<HashSet<_>>();
if missing.is_empty() {
Ok(())
} else {
Err(format!(
"Missing SpecialToken(s) with id(s) `{}`",
missing.iter().join(", ")
))
}
}
}
impl Default for TemplateProcessing {
fn default() -> Self {
Self {
single: "$0".try_into().unwrap(),
pair: "$1".try_into().unwrap(),
added_single: 0,
added_pair: 0,
special_tokens: Tokens::default(),
}
}
}
impl TemplateProcessing {
pub fn builder() -> TemplateProcessingBuilder {
TemplateProcessingBuilder::default()
}
fn apply_template(
&self,
template: &[Piece],
mut encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
let final_encodings: Vec<Encoding> = template
.iter()
.flat_map(|piece| {
match piece {
Piece::Sequence { id, type_id } => {
let i = usize::from(*id != Sequence::A);
let encoding = &mut encodings[i];
encoding.set_type_ids(vec![*type_id; encoding.len()]);
encoding.set_sequence_id(i);
Some(encoding.clone())
}
Piece::SpecialToken { id, type_id } => {
if add_special_tokens {
let tok = &self.special_tokens.0[id]; // We already checked existance above
let len = tok.ids.len();
let encoding = Encoding::new(
tok.ids.clone(),
std::iter::repeat(*type_id).take(len).collect(),
tok.tokens.clone(),
// words
std::iter::repeat(None).take(len).collect(),
// offsets
std::iter::repeat((0, 0)).take(len).collect(),
// special_tokens_mask
std::iter::repeat(1).take(len).collect(),
// attention_mask
std::iter::repeat(1).take(len).collect(),
// overflowing
vec![],
// sequence_range
HashMap::new(),
);
Some(encoding)
} else {
None
}
}
}
})
.collect();
//let mut pair = if encodings.len() > 1 {
// Some(encodings.pop().unwrap())
//} else {
// None
//};
//let mut encoding = encodings.pop().unwrap();
//let pair_overflowing = pair.as_mut().map_or(vec![], |e| e.take_overflowing());
//let mut overflowing: Vec<Encoding> = encoding
// .take_overflowing()
// .iter()
// .map(|encoding| -> Result<Vec<Encoding>> {
// // 1. The pair itself
// let mut overflowings = self.apply_template(
// template,
// if encodings.len() > 1 {
// vec![encoding.clone(), encodings[1].clone()]
// } else {
// vec![encoding.clone()]
// },
// add_special_tokens,
// )?;
// // 2. Its overflowings
// for other_o in &pair_overflowing {
// overflowings.extend(self.apply_template(
// template,
// vec![encoding.clone(), other_o.clone()],
// add_special_tokens,
// )?);
// }
// Ok(overflowings)
// })
// .collect::<Result<Vec<Vec<Encoding>>>>()?
// .into_iter()
// .flatten()
// .collect();
//// We also need to combine the first sequence with all other overflowings
//overflowing.extend(
// pair_overflowing
// .into_iter()
// .map(|pair| {
// self.apply_template(template, vec![encoding.clone(), pair], add_special_tokens)
// })
// .collect::<Result<Vec<_>>>()?
// .into_iter()
// .flatten(),
//);
Ok(final_encodings)
}
}
impl PostProcessor for TemplateProcessing {
fn added_tokens(&self, is_pair: bool) -> usize {
if is_pair {
self.added_pair
} else {
self.added_single
}
}
fn process_encodings(
&self,
encodings: Vec<Encoding>,
add_special_tokens: bool,
) -> Result<Vec<Encoding>> {
// let (encoding, pair): (Encoding, Option<Encoding>) = match encodings.len() {
// 1 => (
// encodings
// .pop()
// .ok_or(ProcessorError::InvalidEncodingsVecLength)?,
// None,
// ),
// 2 => {
// let pair = encodings
// .pop()
// .ok_or(ProcessorError::InvalidEncodingsVecLength)?;
// let encoding = encodings
// .pop()
// .ok_or(ProcessorError::InvalidEncodingsVecLength)?;
// (encoding, Some(pair))
// }
// _ => return Err(Box::new(ProcessorError::InvalidEncodingsVecLength)),
// };
let template = match encodings.len() {
2 => &self.pair.0,
1 => &self.single.0,
_ => todo!(),
};
let encodings = self.apply_template(template, encodings, add_special_tokens)?;
Ok(encodings)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::convert::TryInto;
use std::iter::FromIterator;
#[test]
fn piece_serde() {
let seq_0 = Piece::Sequence {
id: Sequence::A,
type_id: 0,
};
let seq_0_s = r#"{"Sequence":{"id":"A","type_id":0}}"#;
assert_eq!(serde_json::to_string(&seq_0).unwrap(), seq_0_s);
assert_eq!(serde_json::from_str::<Piece>(seq_0_s).unwrap(), seq_0);
let seq_1 = Piece::Sequence {
id: Sequence::B,
type_id: 1,
};
let seq_1_s = r#"{"Sequence":{"id":"B","type_id":1}}"#;
assert_eq!(serde_json::to_string(&seq_1).unwrap(), seq_1_s);
assert_eq!(serde_json::from_str::<Piece>(seq_1_s).unwrap(), seq_1);
let spe = Piece::SpecialToken {
id: "[CLS]".into(),
type_id: 0,
};
let spe_s = r#"{"SpecialToken":{"id":"[CLS]","type_id":0}}"#;
assert_eq!(serde_json::to_string(&spe).unwrap(), spe_s);
assert_eq!(serde_json::from_str::<Piece>(spe_s).unwrap(), spe);
}
#[test]
fn piece() {
assert_eq!(
Ok(Piece::Sequence {
id: Sequence::A,
type_id: 0
}),
"$".try_into()
);
assert_eq!(
Ok(Piece::Sequence {
id: Sequence::B,
type_id: 0
}),
"$B".try_into()
);
assert_eq!(
Ok(Piece::Sequence {
id: Sequence::A,
type_id: 1
}),
"$1".try_into()
);
assert_eq!(
Ok(Piece::Sequence {
id: Sequence::B,
type_id: 2
}),
"$B:2".try_into()
);
assert_eq!(
Ok(Piece::Sequence {
id: Sequence::A,
type_id: 1
}),
"$:1".try_into()
);
assert!(Piece::try_from("$C:1").is_err());
assert!(Piece::try_from("$A:").is_err());
}
#[test]
fn special_token_serde() {
let simple = SpecialToken::from(("[CLS]", 0));
let simple_s = r#"{"id":"[CLS]","ids":[0],"tokens":["[CLS]"]}"#;
assert_eq!(serde_json::to_string(&simple).unwrap(), simple_s);
assert_eq!(
serde_json::from_str::<SpecialToken>(simple_s).unwrap(),
simple
);
let complete = SpecialToken::new(
"[2FR]".into(),
vec![1, 2, 3],
vec!["convert".into(), "to".into(), "FR".into()],
)
.unwrap();
let complete_s = r#"{"id":"[2FR]","ids":[1,2,3],"tokens":["convert","to","FR"]}"#;
assert_eq!(serde_json::to_string(&complete).unwrap(), complete_s);
assert_eq!(
serde_json::from_str::<SpecialToken>(complete_s).unwrap(),
complete
);
let malformed = SpecialToken::new(
"[2FR]".into(),
vec![1, 2],
vec!["convert".into(), "to".into(), "FR".into()],
);
assert!(malformed.is_err());
let malformed = SpecialToken::new(
"[2FR]".into(),
vec![1, 2, 3],
vec!["convert".into(), "FR".into()],
);
assert!(malformed.is_err());
}
#[test]
fn template_serde() {
let template = Template(vec![
Piece::Sequence {
id: Sequence::A,
type_id: 0,
},
Piece::SpecialToken {
id: "[CLS]".into(),
type_id: 0,
},
]);
let template_s =
r#"[{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[CLS]","type_id":0}}]"#;
assert_eq!(serde_json::to_string(&template).unwrap(), template_s);
assert_eq!(
serde_json::from_str::<Template>(template_s).unwrap(),
template
);
}
#[test]
fn tokens_serde() {
let tokens = Tokens::from(vec![("[CLS]", 1), ("[SEP]", 0)]);
let tokens_s = r#"{"[CLS]":{"id":"[CLS]","ids":[1],"tokens":["[CLS]"]},"[SEP]":{"id":"[SEP]","ids":[0],"tokens":["[SEP]"]}}"#;
let tokens_ser = serde_json::to_string(&tokens).unwrap();
assert_eq!(tokens_ser, tokens_s);
assert_eq!(serde_json::from_str::<Tokens>(tokens_s).unwrap(), tokens);
}
fn get_bert_template() -> TemplateProcessing {
TemplateProcessing::builder()
.try_single(vec!["[CLS]", "$0", "[SEP]"])
.unwrap()
.try_pair("[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1")
.unwrap()
.special_tokens(vec![("[CLS]", 1), ("[SEP]", 0)])
.build()
.unwrap()
}
#[test]
fn template_processing_serde() {
let template = tests::get_bert_template();
let template_s = "{\
\"type\":\"TemplateProcessing\",\
\"single\":[\
{\"SpecialToken\":{\"id\":\"[CLS]\",\"type_id\":0}},\
{\"Sequence\":{\"id\":\"A\",\"type_id\":0}},\
{\"SpecialToken\":{\"id\":\"[SEP]\",\"type_id\":0}}\
],\
\"pair\":[\
{\"SpecialToken\":{\"id\":\"[CLS]\",\"type_id\":0}},\
{\"Sequence\":{\"id\":\"A\",\"type_id\":0}},\
{\"SpecialToken\":{\"id\":\"[SEP]\",\"type_id\":0}},\
{\"Sequence\":{\"id\":\"B\",\"type_id\":1}},\
{\"SpecialToken\":{\"id\":\"[SEP]\",\"type_id\":1}}\
],\
\"special_tokens\":{\
\"[CLS]\":{\
\"id\":\"[CLS]\",\"ids\":[1],\"tokens\":[\"[CLS]\"]\
},\
\"[SEP]\":{\
\"id\":\"[SEP]\",\"ids\":[0],\"tokens\":[\"[SEP]\"]\
}\
}}";
let template_ser = serde_json::to_string(&template).unwrap();
assert_eq!(template_ser, template_s);
assert_eq!(
serde_json::from_str::<TemplateProcessing>(template_s).unwrap(),
template
);
}
#[test]
fn missing_special_tokens() {
let processor = TemplateProcessing::builder()
.try_single("[CLS] $0 [SEP]")
.unwrap()
.try_pair("[CLS] $A:0 [SEP] $B:1 [SEP]")
.unwrap()
.build();
let err_a = Err("Missing SpecialToken(s) with id(s) `[SEP], [CLS]`".into());
let err_b = Err("Missing SpecialToken(s) with id(s) `[CLS], [SEP]`".into());
assert!(processor == err_a || processor == err_b);
}
#[test]
fn template_processing() {
let processor = tests::get_bert_template();
assert_eq!(processor.added_tokens(false), 2);
assert_eq!(processor.added_tokens(true), 3);
use crate::Token;
let encoding = Encoding::from_tokens(
vec![
Token::new(12, "Hello".into(), (0, 5)),
Token::new(14, "there".into(), (6, 11)),
],
0,
);
let pair = Encoding::from_tokens(vec![Token::new(15, "pair".into(), (0, 4))], 0);
let single_encoding = processor.process(encoding.clone(), None, true).unwrap();
assert_eq!(
single_encoding,
Encoding::new(
vec![1, 12, 14, 0],
vec![0, 0, 0, 0],
vec![
"[CLS]".into(),
"Hello".into(),
"there".into(),
"[SEP]".into()
],
vec![None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0)],
vec![1, 0, 0, 1],
vec![1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..3)]),
)
);
assert_eq!(single_encoding.token_to_sequence(2), Some(0));
assert_eq!(single_encoding.token_to_sequence(3), None);
let pair_encoding = processor.process(encoding, Some(pair), true).unwrap();
assert_eq!(
pair_encoding,
Encoding::new(
vec![1, 12, 14, 0, 15, 0],
vec![0, 0, 0, 0, 1, 1],
vec![
"[CLS]".into(),
"Hello".into(),
"there".into(),
"[SEP]".into(),
"pair".into(),
"[SEP]".into()
],
vec![None, None, None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (0, 0)],
vec![1, 0, 0, 1, 0, 1],
vec![1, 1, 1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..3), (1, 4..5)]),
)
);
assert_eq!(pair_encoding.token_to_sequence(2), Some(0));
assert_eq!(pair_encoding.token_to_sequence(3), None);
assert_eq!(pair_encoding.token_to_sequence(4), Some(1));
assert_eq!(pair_encoding.token_to_sequence(5), None);
}
#[test]
fn template_processing_overflowing() {
let processor = tests::get_bert_template();
assert_eq!(processor.added_tokens(false), 2);
assert_eq!(processor.added_tokens(true), 3);
use crate::Token;
let mut encoding = Encoding::from_tokens(
vec![
Token::new(12, "Hello".into(), (0, 5)),
Token::new(14, "there".into(), (6, 11)),
],
0,
);
let overflowing = Encoding::from_tokens(vec![Token::new(13, "you".into(), (12, 15))], 0);
encoding.set_overflowing(vec![overflowing]);
let mut pair = Encoding::from_tokens(
vec![
Token::new(15, "pair".into(), (0, 4)),
Token::new(16, "with".into(), (5, 9)),
],
0,
);
let pair_overflowing =
Encoding::from_tokens(vec![Token::new(17, "info".into(), (10, 14))], 0);
pair.set_overflowing(vec![pair_overflowing]);
let single_encoding = processor.process(encoding.clone(), None, true).unwrap();
assert_eq!(
single_encoding,
Encoding::new(
vec![1, 12, 14, 0],
vec![0, 0, 0, 0],
vec![
"[CLS]".into(),
"Hello".into(),
"there".into(),
"[SEP]".into()
],
vec![None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0)],
vec![1, 0, 0, 1],
vec![1, 1, 1, 1],
vec![Encoding::new(
vec![1, 13, 0],
vec![0, 0, 0],
vec!["[CLS]".into(), "you".into(), "[SEP]".into()],
vec![None, None, None],
vec![(0, 0), (12, 15), (0, 0)],
vec![1, 0, 1],
vec![1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..2)]),
)],
HashMap::from_iter(vec![(0, 1..3)]),
)
);
assert_eq!(single_encoding.token_to_sequence(2), Some(0));
assert_eq!(single_encoding.token_to_sequence(3), None);
let pair_encoding = processor.process(encoding, Some(pair), true).unwrap();
println!("{pair_encoding:#?}");
assert_eq!(
pair_encoding,
Encoding::new(
vec![1, 12, 14, 0, 15, 16, 0],
vec![0, 0, 0, 0, 1, 1, 1],
vec![
"[CLS]".into(),
"Hello".into(),
"there".into(),
"[SEP]".into(),
"pair".into(),
"with".into(),
"[SEP]".into()
],
vec![None, None, None, None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0), (0, 4), (5, 9), (0, 0)],
vec![1, 0, 0, 1, 0, 0, 1],
vec![1, 1, 1, 1, 1, 1, 1],
vec![
Encoding::new(
vec![1, 13, 0, 15, 16, 0],
vec![0, 0, 0, 1, 1, 1],
vec![
"[CLS]".into(),
"you".into(),
"[SEP]".into(),
"pair".into(),
"with".into(),
"[SEP]".into()
],
vec![None, None, None, None, None, None],
vec![(0, 0), (12, 15), (0, 0), (0, 4), (5, 9), (0, 0)],
vec![1, 0, 1, 0, 0, 1],
vec![1, 1, 1, 1, 1, 1],
vec![Encoding::new(
vec![1, 13, 0, 17, 0],
vec![0, 0, 0, 0, 1],
vec![
"[CLS]".into(),
"you".into(),
"[SEP]".into(),
"info".into(),
"[SEP]".into()
],
vec![None, None, None, None, None,],
vec![(0, 0), (12, 15), (0, 0), (10, 14), (0, 0)],
vec![1, 0, 1, 0, 1],
vec![1, 1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..2), (1, 3..4)]),
),],
HashMap::from_iter(vec![(1, 3..5), (0, 1..2)]),
),
Encoding::new(
vec![1, 13, 0, 17, 0],
vec![0, 0, 0, 0, 1],
vec![
"[CLS]".into(),
"you".into(),
"[SEP]".into(),
"info".into(),
"[SEP]".into()
],
vec![None, None, None, None, None,],
vec![(0, 0), (12, 15), (0, 0), (10, 14), (0, 0)],
vec![1, 0, 1, 0, 1],
vec![1, 1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..2), (1, 3..4)]),
),
Encoding::new(
vec![1, 12, 14, 0, 17, 0],
vec![0, 0, 0, 0, 0, 1],
vec![
"[CLS]".into(),
"Hello".into(),
"there".into(),
"[SEP]".into(),
"info".into(),
"[SEP]".into()
],
vec![None, None, None, None, None, None],
vec![(0, 0), (0, 5), (6, 11), (0, 0), (10, 14), (0, 0)],
vec![1, 0, 0, 1, 0, 1],
vec![1, 1, 1, 1, 1, 1],
vec![Encoding::new(
vec![1, 13, 0, 17, 0],
vec![0, 0, 0, 0, 1],
vec![
"[CLS]".into(),
"you".into(),
"[SEP]".into(),
"info".into(),
"[SEP]".into()
],
vec![None, None, None, None, None,],
vec![(0, 0), (12, 15), (0, 0), (10, 14), (0, 0)],
vec![1, 0, 1, 0, 1],
vec![1, 1, 1, 1, 1],
vec![],
HashMap::from_iter(vec![(0, 1..2), (1, 3..4)]),
),],
HashMap::from_iter(vec![(0, 1..3), (1, 4..5)]),
)
],
HashMap::from_iter(vec![(0, 1..3), (1, 4..6)]),
)
);
assert_eq!(pair_encoding.token_to_sequence(2), Some(0));
assert_eq!(pair_encoding.token_to_sequence(3), None);
assert_eq!(pair_encoding.token_to_sequence(4), Some(1));
assert_eq!(pair_encoding.token_to_sequence(5), Some(1));
assert_eq!(pair_encoding.token_to_sequence(6), None);
}
#[test]
fn pair_must_use_both_sequences() {
let processor = TemplateProcessing::builder()
.try_single("$0")
.unwrap()
.try_pair("$0 $1")
.unwrap()
.build();
assert_eq!(
processor,
Err("Template for `pair` must use both sequences".into())
);
}
#[test]
fn expect_wrong_error_message() {
let processor = TemplateProcessing::builder()
.try_single("$0")
.unwrap()
.try_pair("$0 $1")
.unwrap()
.build();
assert_ne!(
processor,
Err("Expect the left side error message to be different from the right side!".into())
);
}
}
| tokenizers/tokenizers/src/processors/template.rs/0 | {
"file_path": "tokenizers/tokenizers/src/processors/template.rs",
"repo_id": "tokenizers",
"token_count": 21199
} | 246 |
#[cfg(feature = "progressbar")]
pub(crate) use indicatif::{ProgressBar, ProgressStyle};
#[cfg(not(feature = "progressbar"))]
mod progressbar {
use std::borrow::Cow;
pub struct ProgressBar;
impl ProgressBar {
pub fn new(_length: u64) -> Self {
Self {}
}
pub fn set_length(&self, _length: u64) {}
pub fn set_message(&self, _message: impl Into<Cow<'static, str>>) {}
pub fn finish(&self) {}
pub fn reset(&self) {}
pub fn inc(&self, _inc: u64) {}
pub fn set_style(&self, _style: ProgressStyle) {}
}
pub struct ProgressStyle {}
impl ProgressStyle {
pub fn default_bar() -> Self {
Self {}
}
pub fn template(self, _template: &str) -> Result<Self, String> {
Ok(self)
}
}
}
#[cfg(not(feature = "progressbar"))]
pub(crate) use progressbar::{ProgressBar, ProgressStyle};
| tokenizers/tokenizers/src/utils/progress.rs/0 | {
"file_path": "tokenizers/tokenizers/src/utils/progress.rs",
"repo_id": "tokenizers",
"token_count": 403
} | 247 |
FROM python:3.10-slim
ENV PYTHONDONTWRITEBYTECODE=1
ARG REF=main
USER root
RUN apt-get update && apt-get install -y time git
ENV UV_PYTHON=/usr/local/bin/python
RUN pip install uv && uv venv
RUN uv pip install --no-cache-dir -U pip setuptools GitPython "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ruff]" urllib3
RUN apt-get install -y jq curl && apt-get clean && rm -rf /var/lib/apt/lists/* | transformers/docker/quality.dockerfile/0 | {
"file_path": "transformers/docker/quality.dockerfile",
"repo_id": "transformers",
"token_count": 168
} | 248 |
apiVersion: v1
kind: PersistentVolume
metadata:
name: huggingface-cluster-disk
spec:
storageClassName: ""
capacity:
storage: 500Gi
accessModes:
- ReadOnlyMany
claimRef:
namespace: default
name: huggingface-cluster-disk-claim
gcePersistentDisk:
pdName: huggingface-cluster-disk
fsType: ext4
readOnly: true
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: huggingface-cluster-disk-claim
spec:
# Specify "" as the storageClassName so it matches the PersistentVolume's StorageClass.
# A nil storageClassName value uses the default StorageClass. For details, see
# https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1
storageClassName: ""
accessModes:
- ReadOnlyMany
resources:
requests:
storage: 1Ki
| transformers/docker/transformers-pytorch-tpu/dataset.yaml/0 | {
"file_path": "transformers/docker/transformers-pytorch-tpu/dataset.yaml",
"repo_id": "transformers",
"token_count": 274
} | 249 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Generation with LLMs
[[open-in-colab]]
LLMs (Large Language Models) sind die SchlÃŒsselkomponente bei der Texterstellung. Kurz gesagt, bestehen sie aus groÃen, vortrainierten Transformationsmodellen, die darauf trainiert sind, das nÀchste Wort (oder genauer gesagt Token) aus einem Eingabetext vorherzusagen. Da sie jeweils ein Token vorhersagen, mÃŒssen Sie etwas AufwÀndigeres tun, um neue SÀtze zu generieren, als nur das Modell aufzurufen - Sie mÃŒssen eine autoregressive Generierung durchfÃŒhren.
Die autoregressive Generierung ist ein Verfahren zur Inferenzzeit, bei dem ein Modell mit seinen eigenen generierten Ausgaben iterativ aufgerufen wird, wenn einige anfÀngliche Eingaben vorliegen. In ð€ Transformers wird dies von der Methode [`~generation.GenerationMixin.generate`] ÃŒbernommen, die allen Modellen mit generativen FÀhigkeiten zur VerfÃŒgung steht.
Dieses Tutorial zeigt Ihnen, wie Sie:
* Text mit einem LLM generieren
* Vermeiden Sie hÀufige Fallstricke
* NÀchste Schritte, damit Sie das Beste aus Ihrem LLM herausholen können
Bevor Sie beginnen, stellen Sie sicher, dass Sie alle erforderlichen Bibliotheken installiert haben:
```bash
pip install transformers bitsandbytes>=0.39.0 -q
```
## Text generieren
Ein Sprachmodell, das fÌr [causal language modeling](tasks/language_modeling) trainiert wurde, nimmt eine Folge von Text-Token als Eingabe und gibt die Wahrscheinlichkeitsverteilung fÌr das nÀchste Token zurÌck.
<!-- [GIF 1 -- FWD PASS] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_1_1080p.mov"
></video>
<figcaption>"Forward pass of an LLM"</figcaption>
</figure>
Ein wichtiger Aspekt der autoregressiven Generierung mit LLMs ist die Auswahl des nÀchsten Tokens aus dieser Wahrscheinlichkeitsverteilung. In diesem Schritt ist alles möglich, solange Sie am Ende ein Token fÃŒr die nÀchste Iteration haben. Das heiÃt, es kann so einfach sein wie die Auswahl des wahrscheinlichsten Tokens aus der Wahrscheinlichkeitsverteilung oder so komplex wie die Anwendung von einem Dutzend Transformationen vor der Stichprobenziehung aus der resultierenden Verteilung.
<!-- [GIF 2 -- TEXT GENERATION] -->
<figure class="image table text-center m-0 w-full">
<video
style="max-width: 90%; margin: auto;"
autoplay loop muted playsinline
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/assisted-generation/gif_2_1080p.mov"
></video>
<figcaption>"Die autoregressive Generierung wÀhlt iterativ das nÀchste Token aus einer Wahrscheinlichkeitsverteilung aus, um Text zu erzeugen"</figcaption>
</figure>
Der oben dargestellte Prozess wird iterativ wiederholt, bis eine bestimmte Abbruchbedingung erreicht ist. Im Idealfall wird die Abbruchbedingung vom Modell vorgegeben, das lernen sollte, wann es ein Ende-der-Sequenz-Token (EOS) ausgeben muss. Ist dies nicht der Fall, stoppt die Generierung, wenn eine vordefinierte MaximallÀnge erreicht ist.
Damit sich Ihr Modell so verhÀlt, wie Sie es fÌr Ihre Aufgabe erwarten, mÌssen Sie den Schritt der Token-Auswahl und die Abbruchbedingung richtig einstellen. Aus diesem Grund haben wir zu jedem Modell eine [`~generation.GenerationConfig`]-Datei, die eine gute generative Standardparametrisierung enthÀlt und zusammen mit Ihrem Modell geladen wird.
Lassen Sie uns ÃŒber Code sprechen!
<Tip>
Wenn Sie an der grundlegenden Verwendung von LLMs interessiert sind, ist unsere High-Level-Schnittstelle [`Pipeline`](pipeline_tutorial) ein guter Ausgangspunkt. LLMs erfordern jedoch oft fortgeschrittene Funktionen wie Quantisierung und Feinsteuerung des Token-Auswahlschritts, was am besten ÃŒber [`~generation.GenerationMixin.generate`] erfolgt. Die autoregressive Generierung mit LLMs ist ebenfalls ressourcenintensiv und sollte fÃŒr einen angemessenen Durchsatz auf einer GPU ausgefÃŒhrt werden.
</Tip>
<!-- TODO: update example to llama 2 (or a newer popular baseline) when it becomes ungated -->
ZunÀchst mÌssen Sie das Modell laden.
```py
>>> from transformers import AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained(
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
... )
```
Sie werden zwei Flags in dem Aufruf `from_pretrained` bemerken:
- `device_map` stellt sicher, dass das Modell auf Ihre GPU(s) ÃŒbertragen wird
- `load_in_4bit` wendet [dynamische 4-Bit-Quantisierung](main_classes/quantization) an, um die Ressourcenanforderungen massiv zu reduzieren
Es gibt noch andere Möglichkeiten, ein Modell zu initialisieren, aber dies ist eine gute Grundlage, um mit einem LLM zu beginnen.
Als nÀchstes mÌssen Sie Ihre Texteingabe mit einem [tokenizer](tokenizer_summary) vorverarbeiten.
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
>>> model_inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
```
Die Variable `model_inputs` enthÀlt die tokenisierte Texteingabe sowie die Aufmerksamkeitsmaske. Obwohl [`~generation.GenerationMixin.generate`] sein Bestes tut, um die Aufmerksamkeitsmaske abzuleiten, wenn sie nicht Ìbergeben wird, empfehlen wir, sie fÌr optimale Ergebnisse wann immer möglich zu Ìbergeben.
Rufen Sie schlieÃlich die Methode [`~generation.GenerationMixin.generate`] auf, um die generierten Token zurÃŒckzugeben, die vor dem Drucken in Text umgewandelt werden sollten.
```py
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A list of colors: red, blue, green, yellow, black, white, and brown'
```
Und das war's! Mit ein paar Zeilen Code können Sie sich die Macht eines LLM zunutze machen.
## HÀufige Fallstricke
Es gibt viele [Generierungsstrategien](generation_strategies), und manchmal sind die Standardwerte fÌr Ihren Anwendungsfall vielleicht nicht geeignet. Wenn Ihre Ausgaben nicht mit dem Ìbereinstimmen, was Sie erwarten, haben wir eine Liste der hÀufigsten Fallstricke erstellt und wie Sie diese vermeiden können.
```py
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
>>> model = AutoModelForCausalLM.from_pretrained(
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
... )
```
### Generierte Ausgabe ist zu kurz/lang
Wenn in der Datei [`~generation.GenerationConfig`] nichts angegeben ist, gibt `generate` standardmÀÃig bis zu 20 Token zurÃŒck. Wir empfehlen dringend, `max_new_tokens` in Ihrem `generate`-Aufruf manuell zu setzen, um die maximale Anzahl neuer Token zu kontrollieren, die zurÃŒckgegeben werden können. Beachten Sie, dass LLMs (genauer gesagt, [decoder-only models](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)) auch die Eingabeaufforderung als Teil der Ausgabe zurÃŒckgeben.
```py
>>> model_inputs = tokenizer(["A sequence of numbers: 1, 2"], return_tensors="pt").to("cuda")
>>> # By default, the output will contain up to 20 tokens
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5'
>>> # Setting `max_new_tokens` allows you to control the maximum length
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=50)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'A sequence of numbers: 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,'
```
### Falscher Generierungsmodus
StandardmÀÃig und sofern nicht in der Datei [`~generation.GenerationConfig`] angegeben, wÀhlt `generate` bei jeder Iteration das wahrscheinlichste Token aus (gierige Dekodierung). Je nach Aufgabe kann dies unerwÃŒnscht sein; kreative Aufgaben wie Chatbots oder das Schreiben eines Aufsatzes profitieren vom Sampling. Andererseits profitieren Aufgaben, bei denen es auf die Eingabe ankommt, wie z.B. Audiotranskription oder Ãbersetzung, von der gierigen Dekodierung. Aktivieren Sie das Sampling mit `do_sample=True`. Mehr zu diesem Thema erfahren Sie in diesem [Blogbeitrag](https://huggingface.co/blog/how-to-generate).
```py
>>> # Set seed or reproducibility -- you don't need this unless you want full reproducibility
>>> from transformers import set_seed
>>> set_seed(0)
>>> model_inputs = tokenizer(["I am a cat."], return_tensors="pt").to("cuda")
>>> # LLM + greedy decoding = repetitive, boring output
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat. I am a cat. I am a cat. I am a cat'
>>> # With sampling, the output becomes more creative!
>>> generated_ids = model.generate(**model_inputs, do_sample=True)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'I am a cat.\nI just need to be. I am always.\nEvery time'
```
### Falsche AuffÃŒllseite
LLMs sind [decoder-only](https://huggingface.co/learn/nlp-course/chapter1/6?fw=pt)-Architekturen, d.h. sie iterieren weiter Ìber Ihre Eingabeaufforderung. Wenn Ihre Eingaben nicht die gleiche LÀnge haben, mÌssen sie aufgefÌllt werden. Da LLMs nicht darauf trainiert sind, mit aufgefÌllten Token fortzufahren, muss Ihre Eingabe links aufgefÌllt werden. Vergessen Sie auch nicht, die Aufmerksamkeitsmaske an generate zu Ìbergeben!
```py
>>> # The tokenizer initialized above has right-padding active by default: the 1st sequence,
>>> # which is shorter, has padding on the right side. Generation fails.
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids[0], skip_special_tokens=True)[0]
''
>>> # With left-padding, it works as expected!
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b", padding_side="left")
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
>>> model_inputs = tokenizer(
... ["1, 2, 3", "A, B, C, D, E"], padding=True, return_tensors="pt"
... ).to("cuda")
>>> generated_ids = model.generate(**model_inputs)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
'1, 2, 3, 4, 5, 6,'
```
<!-- TODO: when the prompting guide is ready, mention the importance of setting the right prompt in this section -->
## Weitere Ressourcen
WÀhrend der Prozess der autoregressiven Generierung relativ einfach ist, kann die optimale Nutzung Ihres LLM ein schwieriges Unterfangen sein, da es viele bewegliche Teile gibt. FÌr Ihre nÀchsten Schritte, die Ihnen helfen, tiefer in die LLM-Nutzung und das VerstÀndnis einzutauchen:
<!-- TODO: mit neuen Anleitungen vervollstÀndigen -->
### Fortgeschrittene Nutzung generieren
1. [Leitfaden](generation_strategies) zur Steuerung verschiedener Generierungsmethoden, zur Einrichtung der Generierungskonfigurationsdatei und zum Streaming der Ausgabe;
2. API-Referenz zu [`~generation.GenerationConfig`], [`~generation.GenerationMixin.generate`] und [generate-bezogene Klassen](internal/generation_utils).
### LLM-Ranglisten
1. [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), das sich auf die QualitÀt der Open-Source-Modelle konzentriert;
2. [Open LLM-Perf Leaderboard](https://huggingface.co/spaces/optimum/llm-perf-leaderboard), das sich auf den LLM-Durchsatz konzentriert.
### Latenz und Durchsatz
1. [Leitfaden](main_classes/quantization) zur dynamischen Quantisierung, der Ihnen zeigt, wie Sie Ihren Speicherbedarf drastisch reduzieren können.
### Verwandte Bibliotheken
1. [text-generation-inference](https://github.com/huggingface/text-generation-inference), ein produktionsreifer Server fÃŒr LLMs;
2. [`optimum`](https://github.com/huggingface/optimum), eine Erweiterung von ð€ Transformers, die fÃŒr bestimmte Hardware-GerÀte optimiert.
| transformers/docs/source/de/llm_tutorial.md/0 | {
"file_path": "transformers/docs/source/de/llm_tutorial.md",
"repo_id": "transformers",
"token_count": 4767
} | 250 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# How to create a custom pipeline?
In this guide, we will see how to create a custom pipeline and share it on the [Hub](https://hf.co/models) or add it to the
ð€ Transformers library.
First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes,
dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible
as it makes compatibility easier (even through other languages via JSON). Those will be the `inputs` of the
pipeline (`preprocess`).
Then define the `outputs`. Same policy as the `inputs`. The simpler, the better. Those will be the outputs of
`postprocess` method.
Start by inheriting the base class `Pipeline` with the 4 methods needed to implement `preprocess`,
`_forward`, `postprocess`, and `_sanitize_parameters`.
```python
from transformers import Pipeline
class MyPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
return preprocess_kwargs, {}, {}
def preprocess(self, inputs, maybe_arg=2):
model_input = Tensor(inputs["input_ids"])
return {"model_input": model_input}
def _forward(self, model_inputs):
# model_inputs == {"model_input": model_input}
outputs = self.model(**model_inputs)
# Maybe {"logits": Tensor(...)}
return outputs
def postprocess(self, model_outputs):
best_class = model_outputs["logits"].softmax(-1)
return best_class
```
The structure of this breakdown is to support relatively seamless support for CPU/GPU, while supporting doing
pre/postprocessing on the CPU on different threads
`preprocess` will take the originally defined inputs, and turn them into something feedable to the model. It might
contain more information and is usually a `Dict`.
`_forward` is the implementation detail and is not meant to be called directly. `forward` is the preferred
called method as it contains safeguards to make sure everything is working on the expected device. If anything is
linked to a real model it belongs in the `_forward` method, anything else is in the preprocess/postprocess.
`postprocess` methods will take the output of `_forward` and turn it into the final output that was decided
earlier.
`_sanitize_parameters` exists to allow users to pass any parameters whenever they wish, be it at initialization
time `pipeline(...., maybe_arg=4)` or at call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
The returns of `_sanitize_parameters` are the 3 dicts of kwargs that will be passed directly to `preprocess`,
`_forward`, and `postprocess`. Don't fill anything if the caller didn't call with any extra parameter. That
allows to keep the default arguments in the function definition which is always more "natural".
A classic example would be a `top_k` argument in the post processing in classification tasks.
```python
>>> pipe = pipeline("my-new-task")
>>> pipe("This is a test")
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
>>> pipe("This is a test", top_k=2)
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
```
In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit
`_sanitize_parameters` to allow this new parameter.
```python
def postprocess(self, model_outputs, top_k=5):
best_class = model_outputs["logits"].softmax(-1)
# Add logic to handle top_k
return best_class
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
postprocess_kwargs = {}
if "top_k" in kwargs:
postprocess_kwargs["top_k"] = kwargs["top_k"]
return preprocess_kwargs, {}, postprocess_kwargs
```
Try to keep the inputs/outputs very simple and ideally JSON-serializable as it makes the pipeline usage very easy
without requiring users to understand new kinds of objects. It's also relatively common to support many different types
of arguments for ease of use (audio files, which can be filenames, URLs or pure bytes)
## Adding it to the list of supported tasks
To register your `new-task` to the list of supported tasks, you have to add it to the `PIPELINE_REGISTRY`:
```python
from transformers.pipelines import PIPELINE_REGISTRY
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
)
```
You can specify a default model if you want, in which case it should come with a specific revision (which can be the name of a branch or a commit hash, here we took `"abcdef"`) as well as the type:
```python
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
default={"pt": ("user/awesome_model", "abcdef")},
type="text", # current support type: text, audio, image, multimodal
)
```
## Share your pipeline on the Hub
To share your custom pipeline on the Hub, you just have to save the custom code of your `Pipeline` subclass in a
python file. For instance, let's say we want to use a custom pipeline for sentence pair classification like this:
```py
import numpy as np
from transformers import Pipeline
def softmax(outputs):
maxes = np.max(outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class PairClassificationPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "second_text" in kwargs:
preprocess_kwargs["second_text"] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {"label": label, "score": score, "logits": logits}
```
The implementation is framework agnostic, and will work for PyTorch and TensorFlow models. If we have saved this in
a file named `pair_classification.py`, we can then import it and register it like this:
```py
from pair_classification import PairClassificationPipeline
from transformers.pipelines import PIPELINE_REGISTRY
from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification,
tf_model=TFAutoModelForSequenceClassification,
)
```
Once this is done, we can use it with a pretrained model. For instance `sgugger/finetuned-bert-mrpc` has been
fine-tuned on the MRPC dataset, which classifies pairs of sentences as paraphrases or not.
```py
from transformers import pipeline
classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc")
```
Then we can share it on the Hub by using the `push_to_hub` method:
```py
classifier.push_to_hub("test-dynamic-pipeline")
```
This will copy the file where you defined `PairClassificationPipeline` inside the folder `"test-dynamic-pipeline"`,
along with saving the model and tokenizer of the pipeline, before pushing everything into the repository
`{your_username}/test-dynamic-pipeline`. After that, anyone can use it as long as they provide the option
`trust_remote_code=True`:
```py
from transformers import pipeline
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
```
## Add the pipeline to ð€ Transformers
If you want to contribute your pipeline to ð€ Transformers, you will need to add a new module in the `pipelines` submodule
with the code of your pipeline, then add it to the list of tasks defined in `pipelines/__init__.py`.
Then you will need to add tests. Create a new file `tests/test_pipelines_MY_PIPELINE.py` with examples of the other tests.
The `run_pipeline_test` function will be very generic and run on small random models on every possible
architecture as defined by `model_mapping` and `tf_model_mapping`.
This is very important to test future compatibility, meaning if someone adds a new model for
`XXXForQuestionAnswering` then the pipeline test will attempt to run on it. Because the models are random it's
impossible to check for actual values, that's why there is a helper `ANY` that will simply attempt to match the
output of the pipeline TYPE.
You also *need* to implement 2 (ideally 4) tests.
- `test_small_model_pt` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
and test the pipeline outputs. The results should be the same as `test_small_model_tf`.
- `test_small_model_tf` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
and test the pipeline outputs. The results should be the same as `test_small_model_pt`.
- `test_large_model_pt` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
sure there is no drift in future releases.
- `test_large_model_tf` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
sure there is no drift in future releases.
| transformers/docs/source/en/add_new_pipeline.md/0 | {
"file_path": "transformers/docs/source/en/add_new_pipeline.md",
"repo_id": "transformers",
"token_count": 3336
} | 251 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Fully Sharded Data Parallel
[Fully Sharded Data Parallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) is a data parallel method that shards a model's parameters, gradients and optimizer states across the number of available GPUs (also called workers or *rank*). Unlike [DistributedDataParallel (DDP)](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html), FSDP reduces memory-usage because a model is replicated on each GPU. This improves GPU memory-efficiency and allows you to train much larger models on fewer GPUs. FSDP is integrated with the Accelerate, a library for easily managing training in distributed environments, which means it is available for use from the [`Trainer`] class.
Before you start, make sure Accelerate is installed and at least PyTorch 2.1.0 or newer.
```bash
pip install accelerate
```
## FSDP configuration
To start, run the [`accelerate config`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-config) command to create a configuration file for your training environment. Accelerate uses this configuration file to automatically setup the correct training environment based on your selected training options in `accelerate config`.
```bash
accelerate config
```
When you run `accelerate config`, you'll be prompted with a series of options to configure your training environment. This section covers some of the most important FSDP options. To learn more about the other available FSDP options, take a look at the [fsdp_config](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments.fsdp_config) parameters.
### Sharding strategy
FSDP offers a number of sharding strategies to select from:
* `FULL_SHARD` - shards model parameters, gradients and optimizer states across workers; select `1` for this option
* `SHARD_GRAD_OP`- shard gradients and optimizer states across workers; select `2` for this option
* `NO_SHARD` - don't shard anything (this is equivalent to DDP); select `3` for this option
* `HYBRID_SHARD` - shard model parameters, gradients and optimizer states within each worker where each worker also has a full copy; select `4` for this option
* `HYBRID_SHARD_ZERO2` - shard gradients and optimizer states within each worker where each worker also has a full copy; select `5` for this option
This is enabled by the `fsdp_sharding_strategy` flag.
### CPU offload
You could also offload parameters and gradients when they are not in use to the CPU to save even more GPU memory and help you fit large models where even FSDP may not be sufficient. This is enabled by setting `fsdp_offload_params: true` when running `accelerate config`.
### Wrapping policy
FSDP is applied by wrapping each layer in the network. The wrapping is usually applied in a nested way where the full weights are discarded after each forward pass to save memory for use in the next layer. The *auto wrapping* policy is the simplest way to implement this and you don't need to change any code. You should select `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP` to wrap a Transformer layer and `fsdp_transformer_layer_cls_to_wrap` to specify which layer to wrap (for example `BertLayer`).
Otherwise, you can choose a size-based wrapping policy where FSDP is applied to a layer if it exceeds a certain number of parameters. This is enabled by setting `fsdp_wrap_policy: SIZE_BASED_WRAP` and `min_num_param` to the desired size threshold.
### Checkpointing
Intermediate checkpoints should be saved with `fsdp_state_dict_type: SHARDED_STATE_DICT` because saving the full state dict with CPU offloading on rank 0 takes a lot of time and often results in `NCCL Timeout` errors due to indefinite hanging during broadcasting. You can resume training with the sharded state dicts with the [`~accelerate.Accelerator.load_state`]` method.
```py
# directory containing checkpoints
accelerator.load_state("ckpt")
```
However, when training ends, you want to save the full state dict because sharded state dict is only compatible with FSDP.
```py
if trainer.is_fsdp_enabled:
trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT")
trainer.save_model(script_args.output_dir)
```
### TPU
[PyTorch XLA](https://pytorch.org/xla/release/2.1/index.html) supports FSDP training for TPUs and it can be enabled by modifying the FSDP configuration file generated by `accelerate config`. In addition to the sharding strategies and wrapping options specified above, you can add the parameters shown below to the file.
```yaml
xla: True # must be set to True to enable PyTorch/XLA
xla_fsdp_settings: # XLA-specific FSDP parameters
xla_fsdp_grad_ckpt: True # use gradient checkpointing
```
The [`xla_fsdp_settings`](https://github.com/pytorch/xla/blob/2e6e183e0724818f137c8135b34ef273dea33318/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py#L128) allow you to configure additional XLA-specific parameters for FSDP.
## Launch training
An example FSDP configuration file may look like:
```yaml
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch_policy: BACKWARD_PRE
fsdp_cpu_ram_efficient_loading: true
fsdp_forward_prefetch: false
fsdp_offload_params: true
fsdp_sharding_strategy: 1
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_sync_module_states: true
fsdp_transformer_layer_cls_to_wrap: BertLayer
fsdp_use_orig_params: true
machine_rank: 0
main_training_function: main
mixed_precision: bf16
num_machines: 1
num_processes: 2
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false
```
To launch training, run the [`accelerate launch`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-launch) command and it'll automatically use the configuration file you previously created with `accelerate config`.
```bash
accelerate launch my-trainer-script.py
```
```bash
accelerate launch --fsdp="full shard" --fsdp_config="path/to/fsdp_config/ my-trainer-script.py
```
## Next steps
FSDP can be a powerful tool for training really large models and you have access to more than one GPU or TPU. By sharding the model parameters, optimizer and gradient states, and even offloading them to the CPU when they're inactive, FSDP can reduce the high cost of large-scale training. If you're interested in learning more, the following may be helpful:
* Follow along with the more in-depth Accelerate guide for [FSDP](https://huggingface.co/docs/accelerate/usage_guides/fsdp).
* Read the [Introducing PyTorch Fully Sharded Data Parallel (FSDP) API](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) blog post.
* Read the [Scaling PyTorch models on Cloud TPUs with FSDP](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/) blog post.
| transformers/docs/source/en/fsdp.md/0 | {
"file_path": "transformers/docs/source/en/fsdp.md",
"repo_id": "transformers",
"token_count": 2239
} | 252 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Best Practices for Generation with Cache
Efficient caching is crucial for optimizing the performance of models in various generative tasks,
including text generation, translation, summarization and other transformer-based applications.
Effective caching helps reduce computation time and improve response rates, especially in real-time or resource-intensive applications.
Transformers support various caching methods, leveraging "Cache" classes to abstract and manage the caching logic.
This document outlines best practices for using these classes to maximize performance and efficiency.
Check out all the available `Cache` classes in the [API documentation](./internal/generation_utils.md).
## What is Cache and why we should care?
Imagine youâre having a conversation with someone, and instead of remembering what was said previously, you have to start from scratch every time you respond. This would be slow and inefficient, right? In the world of Transformer models, a similar concept applies, and that's where Caching keys and values come into play. From now on, I'll refer to the concept as KV Cache.
KV cache is needed to optimize the generation in autoregressive models, where the model predicts text token by token. This process can be slow since the model can generate only one token at a time, and each new prediction is dependent on the previous context. That means, to predict token number 1000 in the generation, you need information from the previous 999 tokens, which comes in the form of some matrix multiplications across the representations of those tokens. But to predict token number 1001, you also need the same information from the first 999 tokens, plus additional information from token number 1000. That is where key-value cache is used to optimize the sequential generation process by storing previous calculations to reuse in subsequent tokens, so they don't need to be computed again.
More concretely, key-value cache acts as a memory bank for these generative models, where the model stores key-value pairs derived from self-attention layers for previously processed tokens. By storing this information, the model can avoid redundant computations and instead retrieve keys and values of previous tokens from the cache.
<details>
<summary><em>For the Curious Minds Who Like to Dive Deep</em></summary>
### Under the Hood: How Cache Object Works in Attention Mechanism
When utilizing a cache object in the input, the Attention module performs several critical steps to integrate past and present information seamlessly.
The Attention module concatenates the current key-values with the past key-values stored in the cache. This results in attention weights of shape `(new_tokens_length, past_kv_length + new_tokens_length)`. Essentially, the past and current key-values are combined to compute attention scores, ensuring that the model considers both previous context and new input. The concatenated key-values are used to compute the attention scores resulting in attention weights of shape `(new_tokens_length, past_kv_length + new_tokens_length)`.
Therefore, when iteratively calling `forward()` instead of the `generate()` method, itâs crucial to ensure that the attention mask shape matches the combined length of past and current key-values. The attention mask should have the shape `(batch_size, past_kv_length + new_tokens_length)`. This is usually handled internally when you call `generate()` method. If you want to implement your own generation loop with Cache classes, take this into consideration and prepare the attention mask to hold values to current and past tokens.
<Tip warning={true}>
One important concept you need to know when writing your own generation loop, is `cache_position`. In case you want to reuse an already filled Cache object by calling `forward()`, you have to pass in a valid `cache_position` which will indicate the positions of inputs in the sequence. Note that `cache_position` is not affected by padding, and always adds one more position for each token. For example, if key/value cache contains 10 tokens (no matter how many of it is a pad token), the cache position for the next token should be `torch.tensor([10])`.
</Tip>
See an example below for how to implement your own generation loop.
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
>>> model_id = "meta-llama/Llama-2-7b-chat-hf"
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda:0")
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
>>> past_key_values = DynamicCache()
>>> messages = [{"role": "user", "content": "Hello, what's your name."}]
>>> inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to("cuda:0")
>>> generated_ids = inputs.input_ids
>>> cache_position = torch.arange(inputs.input_ids.shape[1], dtype=torch.int64, device="cuda:0")
>>> max_new_tokens = 10
>>> for _ in range(max_new_tokens):
... outputs = model(**inputs, cache_position=cache_position, past_key_values=past_key_values, use_cache=True)
... # Greedily sample one next token
... next_token_ids = outputs.logits[:, -1:].argmax(-1)
... generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1)
...
... # Prepare inputs for the next generation step by leaaving unprocessed tokens, in our case we have only one new token
... # and expanding attn mask for the new token, as explained above
... attention_mask = inputs["attention_mask"]
... attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1)
... inputs = {"input_ids": next_token_ids, "attention_mask": attention_mask}
... cache_position = cache_position[-1:] + 1 # add one more position for the next token
>>> print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0])
"[INST] Hello, what's your name. [/INST] Hello! My name is LLaMA,"
```
</details>
## Generate with Cache
In ð€ Transformers, we support various Cache types to optimize the performance across different models and tasks. By default, all models generate with caching,
with the [`~DynamicCache`] class being the default cache for most models. It allows us to dynamically grow cache size, by saving more and more keys and values as we generate. If for some reason you don't want to use caches, you can pass `use_cache=False` into the `generate()` method.
Refer to the table below to see the difference between cache types and choose the one that suits best for your use-case.
| Cache Type | Memory Efficient | Supports torch.compile() | Initialization Recommended | Latency | Long Context Generation |
|------------------------|------------------|--------------------------|----------------------------|---------|-------------------------|
| Dynamic Cache | No | No | No | Mid | No |
| Static Cache | No | Yes | Yes | High | No |
| Offloaded Cache | Yes | No | No | Low | Yes |
| Offloaded Static Cache | No | Yes | Yes | High | Yes |
| Quantized Cache | Yes | No | No | Low | Yes |
| Sliding Window Cache | No | Yes | Yes | High | No |
| Sink Cache | Yes | No | Yes | Mid | Yes |
These cache classes can be set with a `cache_implementation` argument when generating. To learn about the available options for the cache_implementation flag, please refer to the [API Documentation](./main_classes/text_generation.md#transformers.GenerationConfig). Now, let's explore each cache type in detail and see how to use them. Note that the below examples are for decoder-only Tranformer-based models. We also support ["Model-Specific Cache"] classes for models such as Mamba or Jamba, keep reading for more details.
### Quantized Cache
The key and value cache can occupy a large portion of memory, becoming a [bottleneck for long-context generation](https://huggingface.co/blog/llama31#inference-memory-requirements), especially for Large Language Models.
Quantizing the cache when using `generate()` can significantly reduce memory requirements at the cost of speed.
KV Cache quantization in `transformers` is largely inspired by the paper ["KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache"](https://arxiv.org/abs/2402.02750) and currently supports [`~QuantoQuantizedCache`] and [`~HQQQuantizedCache`] classes. For more information on the inner workings see the paper.
To enable quantization of the key-value cache, one needs to indicate `cache_implementation="quantized"` in the `generation_config`.
Quantization related arguments should be passed to the `generation_config` either as a `dict` or an instance of a [`~QuantizedCacheConfig`] class.
One has to indicate which quantization backend to use in the [`~QuantizedCacheConfig`], the default is `quanto`.
<Tip warning={true}>
Cache quantization can be detrimental in terms of latency if the context length is short and there is enough GPU VRAM available to run without cache quantization. It is recommended to seek balance between memory efficiency and latency.
</Tip>
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
>>> inputs = tokenizer("I like rock music because", return_tensors="pt").to(model.device)
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="quantized", cache_config={"nbits": 4, "backend": "quanto"})
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
I like rock music because it's loud and energetic. It's a great way to express myself and rel
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20)
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
I like rock music because it's loud and energetic. I like to listen to it when I'm feeling
```
## Offloaded Cache
Similarly to KV cache quantization, [`~OffloadedCache`] strategy aims to reduce GPU VRAM usage.
It does so by moving the KV cache for most layers to the CPU.
As the model's `forward()` method iterates over the layers, this strategy maintains the current layer cache on the GPU.
At the same time it asynchronously prefetches the next layer cache as well as sending the previous layer cache back to the CPU.
Unlike KV cache quantization, this strategy always produces the same result as the default KV cache implementation.
Thus, it can serve as a drop-in replacement or a fallback for it.
Depending on your model and the characteristics of your generation task (size of context, number of generated tokens, number of beams, etc.)
you may notice a small degradation in generation throughput compared to the default KV cache implementation.
To enable KV cache offloading, pass `cache_implementation="offloaded"` in the `generation_config` or directly to the `generate()` call.
Use `cache_implementation="offloaded_static"` for an offloaded static cache (see also [Offloaded Static Cache](#offloaded-static-cache) below).
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> ckpt = "microsoft/Phi-3-mini-4k-instruct"
>>> tokenizer = AutoTokenizer.from_pretrained(ckpt)
>>> model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16).to("cuda:0")
>>> inputs = tokenizer("Fun fact: The shortest", return_tensors="pt").to(model.device)
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=23, cache_implementation="offloaded")
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
Fun fact: The shortest war in history was between Britain and Zanzibar on August 27, 1896.
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=23)
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
Fun fact: The shortest war in history was between Britain and Zanzibar on August 27, 1896.
```
<Tip warning={true}>
Cache offloading requires a GPU and can be slower than dynamic KV cache. Use it if you are getting CUDA out of memory errors.
</Tip>
The example below shows how KV cache offloading can be used as a fallback strategy.
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> def resilient_generate(model, *args, **kwargs):
... oom = False
... try:
... return model.generate(*args, **kwargs)
... except torch.cuda.OutOfMemoryError as e:
... print(e)
... print("retrying with cache_implementation='offloaded'")
... oom = True
... if oom:
... torch.cuda.empty_cache()
... kwargs["cache_implementation"] = "offloaded"
... return model.generate(*args, **kwargs)
...
...
>>> ckpt = "microsoft/Phi-3-mini-4k-instruct"
>>> tokenizer = AutoTokenizer.from_pretrained(ckpt)
>>> model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16).to("cuda:0")
>>> prompt = ["okay "*1000 + "Fun fact: The most"]
>>> inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
>>> beams = { "num_beams": 40, "num_beam_groups": 40, "num_return_sequences": 40, "diversity_penalty": 1.0, "max_new_tokens": 23, "early_stopping": True, }
>>> out = resilient_generate(model, **inputs, **beams)
>>> responses = tokenizer.batch_decode(out[:,-28:], skip_special_tokens=True)
```
On a GPU with 50 GB of RAM, running this code will print
```
CUDA out of memory. Tried to allocate 4.83 GiB. GPU
retrying with cache_implementation='offloaded'
```
before successfully generating 40 beams.
### Static Cache
Since the "DynamicCache" dynamically grows with each generation step, it prevents you from taking advantage of JIT optimizations. The [`~StaticCache`] pre-allocates
a specific maximum size for the keys and values, allowing you to generate up to the maximum length without having to modify cache size. Check the below usage example.
For more examples with Static Cache and JIT compilation, take a look at [StaticCache & torchcompile](./llm_optims.md#static-kv-cache-and-torchcompile)
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
>>> # simply pass the cache implementation="static"
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="static")
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
```
## Offloaded Static Cache
Like [`~OffloadedCache`] exists for offloading a "DynamicCache", there is also an offloaded static cache. It fully supports
JIT optimizations. Just pass `cache_implementation="offloaded_static"` in the `generation_config` or directly to the `generate()` call.
This will use the [`~OffloadedStaticCache`] implementation instead.
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
>>> # simply pass the cache implementation="static"
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="offloaded_static")
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
```
### Sliding Window Cache
As the name suggests, this cache type implements a sliding window over previous keys and values, retaining only the last `sliding_window` tokens. It should be used with models like Mistral that support sliding window attention. Additionally, similar to Static Cache, this one is JIT-friendly and can be used with the same compile tecniques as Static Cache.
Note that you can use this cache only for models that support sliding window, e.g. Mistral models.
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16).to("cuda:0")
>>> inputs = tokenizer("Yesterday I was on a rock concert and.", return_tensors="pt").to(model.device)
>>> # can be used by passing in cache implementation
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, cache_implementation="sliding_window")
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
"Yesterday I was on a rock concert and. I was so excited to see my favorite band. I was so excited that I was jumping up and down and screaming. I was so excited that I"
```
### Sink Cache
Sink Cache was introduced in ["Efficient Streaming Language Models with Attention Sinks"](https://arxiv.org/abs/2309.17453). It allows you to generate long sequences of text ("infinite length" according to the paper) without any fine-tuning. That is achieved by smart handling of previous keys and values, specifically it retains a few initial tokens from the sequence, called "sink tokens". This is based on the observation that these initial tokens attract a significant portion of attention scores during the generation process. Tokens that come after "sink tokens" are discarded on a sliding windowed basis, keeping only the latest `window_size` tokens. By keeping these initial tokens as "attention sinks," the model maintains stable performance even when dealing with very long texts, thus discarding most of the previous knowledge.
Unlike other cache classes, this one can't be used directly by indicating a `cache_implementation`. You have to initialize the Cache before calling on `generate()` as follows.
```python
>>> import torch
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
>>> inputs = tokenizer("This is a long story about unicorns, fairies and magic.", return_tensors="pt").to(model.device)
>>> # get our cache, specify number of sink tokens and window size
>>> # Note that window size already includes sink tokens, so has to be larger
>>> past_key_values = SinkCache(window_length=256, num_sink_tokens=4)
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, past_key_values=past_key_values)
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
"This is a long story about unicorns, fairies and magic. It is a fantasy world where unicorns and fairies live together in harmony. The story follows a young girl named Lily"
```
### Encoder-Decoder Cache
The [`~EncoderDecoderCache`] is a wrapper designed to handle the caching needs of encoder-decoder models. This cache type is specifically built to manage both self-attention and cross-attention caches, ensuring storage and retrieval of past key/values required for these complex models. Cool thing about Encoder-Decoder Cache is that you can set different cache types for the encoder and for the decoder, depending on your use case. Currently this cache is only supported in [Whisper](./model_doc/whisper.md) models but we will be adding more models soon.
In terms of usage, there is nothing special to be done and calling `generate()` or `forward()` will handle everything for you.
### Model-specific Cache Classes
Some models require storing previous keys, values, or states in a specific way, and the above cache classes cannot be used. For such cases, we have several specialized cache classes that are designed for specific models. These models only accept their own dedicated cache classes and do not support using any other cache types. Some examples include [`~HybridCache`] for [Gemma2](./model_doc/gemma2.md) series models or [`~MambaCache`] for [Mamba](./model_doc/mamba.md) architecture models.
## Iterative Generation with Cache
We have seen how to use each of the cache types when generating. What if you want to use cache in iterative generation setting, for example in applications like chatbots, where interactions involve multiple turns and continuous back-and-forth exchanges. Iterative generation with cache allows these systems to handle ongoing conversations effectively without reprocessing the entire context at each step. But there are some tips that you should know before you start implementing:
The general format when doing iterative generation is as below. First you have to initialize an empty cache of the type you want, and you can start feeding in new prompts iteratively. Keeping track of dialogues history and formatting can be done with chat templates, read more on that in [chat_templating](./chat_templating.md)
In case you are using Sink Cache, you have to crop your inputs to that maximum length because Sink Cache can generate text longer than its maximum window size, but it expects the first input to not exceed the maximum cache length.
```python
>>> import torch
>>> from transformers import AutoTokenizer,AutoModelForCausalLM
>>> from transformers.cache_utils import (
>>> DynamicCache,
>>> SinkCache,
>>> StaticCache,
>>> SlidingWindowCache,
>>> QuantoQuantizedCache,
>>> QuantizedCacheConfig,
>>> )
>>> model_id = "meta-llama/Llama-2-7b-chat-hf"
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map='auto')
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
>>> user_prompts = ["Hello, what's your name?", "Btw, yesterday I was on a rock concert."]
>>> past_key_values = DynamicCache()
>>> max_cache_length = past_key_values.get_max_length()
>>> messages = []
>>> for prompt in user_prompts:
... messages.append({"role": "user", "content": prompt})
... inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
... if isinstance(past_key_values, SinkCache):
... inputs = {k: v[:, -max_cache_length:] for k, v in inputs.items()}
...
... input_length = inputs["input_ids"].shape[1]
...
... outputs = model.generate(**inputs, do_sample=False, max_new_tokens=256, past_key_values=past_key_values)
... completion = tokenizer.decode(outputs[0, input_length: ], skip_special_tokens=True)
... messages.append({"role": "assistant", "content": completion})
print(messages)
[{'role': 'user', 'content': "Hello, what's your name?"}, {'role': 'assistant', 'content': " Hello! My name is LLaMA, I'm a large language model trained by a team of researcher at Meta AI. ð"}, {'role': 'user', 'content': 'Btw, yesterday I was on a rock concert.'}, {'role': 'assistant', 'content': ' Oh, cool! That sounds like a lot of fun! ð Did you enjoy the concert? What was the band like? ð€'}]
```
## Re-use Cache to continue generation
Sometimes you would want to fist fill-in cache object with key/values for certain prefix prompt and re-use it several times to generate different sequences from it. We are working hard on adding this feature to ð€ Transformers and will update this section soon.
| transformers/docs/source/en/kv_cache.md/0 | {
"file_path": "transformers/docs/source/en/kv_cache.md",
"repo_id": "transformers",
"token_count": 7492
} | 253 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BARThez
## Overview
The BARThez model was proposed in [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct,
2020.
The abstract of the paper:
*Inductive transfer learning, enabled by self-supervised learning, have taken the entire Natural Language Processing
(NLP) field by storm, with models such as BERT and BART setting new state of the art on countless natural language
understanding tasks. While there are some notable exceptions, most of the available models and research have been
conducted for the English language. In this work, we introduce BARThez, the first BART model for the French language
(to the best of our knowledge). BARThez was pretrained on a very large monolingual French corpus from past research
that we adapted to suit BART's perturbation schemes. Unlike already existing BERT-based French language models such as
CamemBERT and FlauBERT, BARThez is particularly well-suited for generative tasks, since not only its encoder but also
its decoder is pretrained. In addition to discriminative tasks from the FLUE benchmark, we evaluate BARThez on a novel
summarization dataset, OrangeSum, that we release with this paper. We also continue the pretraining of an already
pretrained multilingual BART on BARThez's corpus, and we show that the resulting model, which we call mBARTHez,
provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.*
This model was contributed by [moussakam](https://huggingface.co/moussakam). The Authors' code can be found [here](https://github.com/moussaKam/BARThez).
<Tip>
BARThez implementation is the same as BART, except for tokenization. Refer to [BART documentation](bart) for information on
configuration classes and their parameters. BARThez-specific tokenizers are documented below.
</Tip>
## Resources
- BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check:
[examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md).
## BarthezTokenizer
[[autodoc]] BarthezTokenizer
## BarthezTokenizerFast
[[autodoc]] BarthezTokenizerFast
| transformers/docs/source/en/model_doc/barthez.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/barthez.md",
"repo_id": "transformers",
"token_count": 818
} | 254 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BORT
<Tip warning={true}>
This model is in maintenance mode only, we do not accept any new PRs changing its code.
If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0.
You can do so by running the following command: `pip install -U transformers==4.30.0`.
</Tip>
## Overview
The BORT model was proposed in [Optimal Subarchitecture Extraction for BERT](https://arxiv.org/abs/2010.10499) by
Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the
authors refer to as "Bort".
The abstract from the paper is the following:
*We extract an optimal subset of architectural parameters for the BERT architecture from Devlin et al. (2018) by
applying recent breakthroughs in algorithms for neural architecture search. This optimal subset, which we refer to as
"Bort", is demonstrably smaller, having an effective (that is, not counting the embedding layer) size of 5.5% the
original BERT-large architecture, and 16% of the net size. Bort is also able to be pretrained in 288 GPU hours, which
is 1.2% of the time required to pretrain the highest-performing BERT parametric architectural variant, RoBERTa-large
(Liu et al., 2019), and about 33% of that of the world-record, in GPU hours, required to train BERT-large on the same
hardware. It is also 7.9x faster on a CPU, as well as being better performing than other compressed variants of the
architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%,
absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.*
This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/alexa/bort/).
## Usage tips
- BORT's model architecture is based on BERT, refer to [BERT's documentation page](bert) for the
model's API reference as well as usage examples.
- BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, refer to [RoBERTa's documentation page](roberta) for the tokenizer's API reference as well as usage examples.
- BORT requires a specific fine-tuning algorithm, called [Agora](https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology) ,
that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the
algorithm to make BORT fine-tuning work.
| transformers/docs/source/en/model_doc/bort.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/bort.md",
"repo_id": "transformers",
"token_count": 867
} | 255 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ConvBERT
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=convbert">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-convbert-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/conv-bert-base">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
The ConvBERT model was proposed in [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng
Yan.
The abstract from the paper is the following:
*Pre-trained language models like BERT and its variants have recently achieved impressive performance in various
natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers
large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for
generating the attention map from a global perspective, we observe some heads only need to learn local dependencies,
which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to
replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the
rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context
learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that
ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and
fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while
using less than 1/4 training cost. Code and pre-trained models will be released.*
This model was contributed by [abhishek](https://huggingface.co/abhishek). The original implementation can be found
here: https://github.com/yitu-opensource/ConvBert
## Usage tips
ConvBERT training tips are similar to those of BERT. For usage tips refer to [BERT documentation](bert).
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## ConvBertConfig
[[autodoc]] ConvBertConfig
## ConvBertTokenizer
[[autodoc]] ConvBertTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## ConvBertTokenizerFast
[[autodoc]] ConvBertTokenizerFast
<frameworkcontent>
<pt>
## ConvBertModel
[[autodoc]] ConvBertModel
- forward
## ConvBertForMaskedLM
[[autodoc]] ConvBertForMaskedLM
- forward
## ConvBertForSequenceClassification
[[autodoc]] ConvBertForSequenceClassification
- forward
## ConvBertForMultipleChoice
[[autodoc]] ConvBertForMultipleChoice
- forward
## ConvBertForTokenClassification
[[autodoc]] ConvBertForTokenClassification
- forward
## ConvBertForQuestionAnswering
[[autodoc]] ConvBertForQuestionAnswering
- forward
</pt>
<tf>
## TFConvBertModel
[[autodoc]] TFConvBertModel
- call
## TFConvBertForMaskedLM
[[autodoc]] TFConvBertForMaskedLM
- call
## TFConvBertForSequenceClassification
[[autodoc]] TFConvBertForSequenceClassification
- call
## TFConvBertForMultipleChoice
[[autodoc]] TFConvBertForMultipleChoice
- call
## TFConvBertForTokenClassification
[[autodoc]] TFConvBertForTokenClassification
- call
## TFConvBertForQuestionAnswering
[[autodoc]] TFConvBertForQuestionAnswering
- call
</tf>
</frameworkcontent>
| transformers/docs/source/en/model_doc/convbert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/convbert.md",
"repo_id": "transformers",
"token_count": 1393
} | 256 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Encoder Decoder Models
## Overview
The [`EncoderDecoderModel`] can be used to initialize a sequence-to-sequence model with any
pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks
was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by
Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
After such an [`EncoderDecoderModel`] has been trained/fine-tuned, it can be saved/loaded just like
any other models (see the examples for more information).
An application of this architecture could be to leverage two pretrained [`BertModel`] as the encoder
and decoder for a summarization model as was shown in: [Text Summarization with Pretrained Encoders](https://arxiv.org/abs/1908.08345) by Yang Liu and Mirella Lapata.
## Randomly initializing `EncoderDecoderModel` from model configurations.
[`EncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`BertModel`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder.
```python
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
>>> config_encoder = BertConfig()
>>> config_decoder = BertConfig()
>>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> model = EncoderDecoderModel(config=config)
```
## Initialising `EncoderDecoderModel` from a pretrained encoder and a pretrained decoder.
[`EncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained auto-encoding model, *e.g.* BERT, can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder.
Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized.
Initializing [`EncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder).
To do so, the `EncoderDecoderModel` class provides a [`EncoderDecoderModel.from_encoder_decoder_pretrained`] method.
```python
>>> from transformers import EncoderDecoderModel, BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased")
```
## Loading an existing `EncoderDecoderModel` checkpoint and perform inference.
To load fine-tuned checkpoints of the `EncoderDecoderModel` class, [`EncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling.
```python
>>> from transformers import AutoTokenizer, EncoderDecoderModel
>>> # load a fine-tuned seq2seq model and corresponding tokenizer
>>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
>>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
>>> # let's perform inference on a long piece of text
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> input_ids = tokenizer(ARTICLE_TO_SUMMARIZE, return_tensors="pt").input_ids
>>> # autoregressively generate summary (uses greedy decoding by default)
>>> generated_ids = model.generate(input_ids)
>>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_text)
nearly 800 thousand customers were affected by the shutoffs. the aim is to reduce the risk of wildfires. nearly 800, 000 customers were expected to be affected by high winds amid dry conditions. pg & e said it scheduled the blackouts to last through at least midday tomorrow.
```
## Loading a PyTorch checkpoint into `TFEncoderDecoderModel`.
[`TFEncoderDecoderModel.from_pretrained`] currently doesn't support initializing the model from a
pytorch checkpoint. Passing `from_pt=True` to this method will throw an exception. If there are only pytorch
checkpoints for a particular encoder-decoder model, a workaround is:
```python
>>> # a workaround to load from pytorch checkpoint
>>> from transformers import EncoderDecoderModel, TFEncoderDecoderModel
>>> _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
>>> _model.encoder.save_pretrained("./encoder")
>>> _model.decoder.save_pretrained("./decoder")
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(
... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
... )
>>> # This is only for copying some specific attributes of this particular model.
>>> model.config = _model.config
```
## Training
Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model.
As you can see, only 2 inputs are required for the model in order to compute a loss: `input_ids` (which are the
`input_ids` of the encoded input sequence) and `labels` (which are the `input_ids` of the encoded
target sequence).
```python
>>> from transformers import BertTokenizer, EncoderDecoderModel
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased")
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> input_ids = tokenizer(
... "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side.During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft).Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.",
... return_tensors="pt",
... ).input_ids
>>> labels = tokenizer(
... "the eiffel tower surpassed the washington monument to become the tallest structure in the world. it was the first structure to reach a height of 300 metres in paris in 1930. it is now taller than the chrysler building by 5. 2 metres ( 17 ft ) and is the second tallest free - standing structure in paris.",
... return_tensors="pt",
... ).input_ids
>>> # the forward function automatically creates the correct decoder_input_ids
>>> loss = model(input_ids=input_ids, labels=labels).loss
```
Detailed [colab](https://colab.research.google.com/drive/1WIk2bxglElfZewOHboPFNj8H44_VAyKE?usp=sharing#scrollTo=ZwQIEhKOrJpl) for training.
This model was contributed by [thomwolf](https://github.com/thomwolf). This model's TensorFlow and Flax versions
were contributed by [ydshieh](https://github.com/ydshieh).
## EncoderDecoderConfig
[[autodoc]] EncoderDecoderConfig
<frameworkcontent>
<pt>
## EncoderDecoderModel
[[autodoc]] EncoderDecoderModel
- forward
- from_encoder_decoder_pretrained
</pt>
<tf>
## TFEncoderDecoderModel
[[autodoc]] TFEncoderDecoderModel
- call
- from_encoder_decoder_pretrained
</tf>
<jax>
## FlaxEncoderDecoderModel
[[autodoc]] FlaxEncoderDecoderModel
- __call__
- from_encoder_decoder_pretrained
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/encoder-decoder.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/encoder-decoder.md",
"repo_id": "transformers",
"token_count": 2664
} | 257 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Gemma
## Overview
The Gemma model was proposed in [Gemma: Open Models Based on Gemini Technology and Research](https://blog.google/technology/developers/gemma-open-models/) by Gemma Team, Google.
Gemma models are trained on 6T tokens, and released with 2 versions, 2b and 7b.
The abstract from the paper is the following:
*This work introduces Gemma, a new family of open language models demonstrating strong performance across academic benchmarks for language understanding, reasoning, and safety. We release two sizes of models (2 billion and 7 billion parameters), and provide both pretrained and fine-tuned checkpoints. Gemma outperforms similarly sized open models on 11 out of 18 text-based tasks, and we present comprehensive evaluations of safety and responsibility aspects of the models, alongside a detailed description of our model development. We believe the responsible release of LLMs is critical for improving the safety of frontier models, and for enabling the next wave of LLM innovations*
Tips:
- The original checkpoints can be converted using the conversion script `src/transformers/models/gemma/convert_gemma_weights_to_hf.py`
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [Younes Belkada](https://huggingface.co/ybelkada), [Sanchit Gandhi](https://huggingface.co/sanchit-gandhi), [Pedro Cuenca](https://huggingface.co/pcuenq).
## GemmaConfig
[[autodoc]] GemmaConfig
## GemmaTokenizer
[[autodoc]] GemmaTokenizer
## GemmaTokenizerFast
[[autodoc]] GemmaTokenizerFast
## GemmaModel
[[autodoc]] GemmaModel
- forward
## GemmaForCausalLM
[[autodoc]] GemmaForCausalLM
- forward
## GemmaForSequenceClassification
[[autodoc]] GemmaForSequenceClassification
- forward
## GemmaForTokenClassification
[[autodoc]] GemmaForTokenClassification
- forward
## FlaxGemmaModel
[[autodoc]] FlaxGemmaModel
- __call__
## FlaxGemmaForCausalLM
[[autodoc]] FlaxGemmaForCausalLM
- __call__
| transformers/docs/source/en/model_doc/gemma.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/gemma.md",
"repo_id": "transformers",
"token_count": 764
} | 258 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# HerBERT
## Overview
The HerBERT model was proposed in [KLEJ: Comprehensive Benchmark for Polish Language Understanding](https://www.aclweb.org/anthology/2020.acl-main.111.pdf) by Piotr Rybak, Robert Mroczkowski, Janusz Tracz, and
Ireneusz Gawlik. It is a BERT-based Language Model trained on Polish Corpora using only MLM objective with dynamic
masking of whole words.
The abstract from the paper is the following:
*In recent years, a series of Transformer-based models unlocked major improvements in general natural language
understanding (NLU) tasks. Such a fast pace of research would not be possible without general NLU benchmarks, which
allow for a fair comparison of the proposed methods. However, such benchmarks are available only for a handful of
languages. To alleviate this issue, we introduce a comprehensive multi-task benchmark for the Polish language
understanding, accompanied by an online leaderboard. It consists of a diverse set of tasks, adopted from existing
datasets for named entity recognition, question-answering, textual entailment, and others. We also introduce a new
sentiment analysis task for the e-commerce domain, named Allegro Reviews (AR). To ensure a common evaluation scheme and
promote models that generalize to different NLU tasks, the benchmark includes datasets from varying domains and
applications. Additionally, we release HerBERT, a Transformer-based model trained specifically for the Polish language,
which has the best average performance and obtains the best results for three out of nine tasks. Finally, we provide an
extensive evaluation, including several standard baselines and recently proposed, multilingual Transformer-based
models.*
This model was contributed by [rmroczkowski](https://huggingface.co/rmroczkowski). The original code can be found
[here](https://github.com/allegro/HerBERT).
## Usage example
```python
>>> from transformers import HerbertTokenizer, RobertaModel
>>> tokenizer = HerbertTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
>>> model = RobertaModel.from_pretrained("allegro/herbert-klej-cased-v1")
>>> encoded_input = tokenizer.encode("Kto ma lepszÄ
sztukÄ, ma lepszy rzÄ
d â to jasne.", return_tensors="pt")
>>> outputs = model(encoded_input)
>>> # HerBERT can also be loaded using AutoTokenizer and AutoModel:
>>> import torch
>>> from transformers import AutoModel, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("allegro/herbert-klej-cased-tokenizer-v1")
>>> model = AutoModel.from_pretrained("allegro/herbert-klej-cased-v1")
```
<Tip>
Herbert implementation is the same as `BERT` except for the tokenization method. Refer to [BERT documentation](bert)
for API reference and examples.
</Tip>
## HerbertTokenizer
[[autodoc]] HerbertTokenizer
## HerbertTokenizerFast
[[autodoc]] HerbertTokenizerFast
| transformers/docs/source/en/model_doc/herbert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/herbert.md",
"repo_id": "transformers",
"token_count": 956
} | 259 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# MADLAD-400
## Overview
MADLAD-400 models were released in the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](MADLAD-400: A Multilingual And Document-Level Large Audited Dataset).
The abstract from the paper is the following:
*We introduce MADLAD-400, a manually audited, general domain 3T token monolingual dataset based on CommonCrawl, spanning 419 languages. We discuss
the limitations revealed by self-auditing MADLAD-400, and the role data auditing
had in the dataset creation process. We then train and release a 10.7B-parameter
multilingual machine translation model on 250 billion tokens covering over 450
languages using publicly available data, and find that it is competitive with models
that are significantly larger, and report the results on different domains. In addition, we train a 8B-parameter language model, and assess the results on few-shot
translation. We make the baseline models 1
available to the research community.*
This model was added by [Juarez Bochi](https://huggingface.co/jbochi). The original checkpoints can be found [here](https://github.com/google-research/google-research/tree/master/madlad_400).
This is a machine translation model that supports many low-resource languages, and that is competitive with models that are significantly larger.
One can directly use MADLAD-400 weights without finetuning the model:
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> model = AutoModelForSeq2SeqLM.from_pretrained("google/madlad400-3b-mt")
>>> tokenizer = AutoTokenizer.from_pretrained("google/madlad400-3b-mt")
>>> inputs = tokenizer("<2pt> I love pizza!", return_tensors="pt")
>>> outputs = model.generate(**inputs)
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
['Eu amo pizza!']
```
Google has released the following variants:
- [google/madlad400-3b-mt](https://huggingface.co/google/madlad400-3b-mt)
- [google/madlad400-7b-mt](https://huggingface.co/google/madlad400-7b-mt)
- [google/madlad400-7b-mt-bt](https://huggingface.co/google/madlad400-7b-mt-bt)
- [google/madlad400-10b-mt](https://huggingface.co/google/madlad400-10b-mt)
The original checkpoints can be found [here](https://github.com/google-research/google-research/tree/master/madlad_400).
<Tip>
Refer to [T5's documentation page](t5) for all API references, code examples, and notebooks. For more details regarding training and evaluation of the MADLAD-400, refer to the model card.
</Tip>
| transformers/docs/source/en/model_doc/madlad-400.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/madlad-400.md",
"repo_id": "transformers",
"token_count": 930
} | 260 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# mLUKE
## Overview
The mLUKE model was proposed in [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka. It's a multilingual extension
of the [LUKE model](https://arxiv.org/abs/2010.01057) trained on the basis of XLM-RoBERTa.
It is based on XLM-RoBERTa and adds entity embeddings, which helps improve performance on various downstream tasks
involving reasoning about entities such as named entity recognition, extractive question answering, relation
classification, cloze-style knowledge completion.
The abstract from the paper is the following:
*Recent studies have shown that multilingual pretrained language models can be effectively improved with cross-lingual
alignment information from Wikipedia entities. However, existing methods only exploit entity information in pretraining
and do not explicitly use entities in downstream tasks. In this study, we explore the effectiveness of leveraging
entity representations for downstream cross-lingual tasks. We train a multilingual language model with 24 languages
with entity representations and show the model consistently outperforms word-based pretrained models in various
cross-lingual transfer tasks. We also analyze the model and the key insight is that incorporating entity
representations into the input allows us to extract more language-agnostic features. We also evaluate the model with a
multilingual cloze prompt task with the mLAMA dataset. We show that entity-based prompt elicits correct factual
knowledge more likely than using only word representations.*
This model was contributed by [ryo0634](https://huggingface.co/ryo0634). The original code can be found [here](https://github.com/studio-ousia/luke).
## Usage tips
One can directly plug in the weights of mLUKE into a LUKE model, like so:
```python
from transformers import LukeModel
model = LukeModel.from_pretrained("studio-ousia/mluke-base")
```
Note that mLUKE has its own tokenizer, [`MLukeTokenizer`]. You can initialize it as follows:
```python
from transformers import MLukeTokenizer
tokenizer = MLukeTokenizer.from_pretrained("studio-ousia/mluke-base")
```
<Tip>
As mLUKE's architecture is equivalent to that of LUKE, one can refer to [LUKE's documentation page](luke) for all
tips, code examples and notebooks.
</Tip>
## MLukeTokenizer
[[autodoc]] MLukeTokenizer
- __call__
- save_vocabulary
| transformers/docs/source/en/model_doc/mluke.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/mluke.md",
"repo_id": "transformers",
"token_count": 825
} | 261 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Nezha
<Tip warning={true}>
This model is in maintenance mode only, we don't accept any new PRs changing its code.
If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2.
You can do so by running the following command: `pip install -U transformers==4.40.2`.
</Tip>
## Overview
The Nezha model was proposed in [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei et al.
The abstract from the paper is the following:
*The pre-trained language models have achieved great successes in various natural language understanding (NLU) tasks
due to its capacity to capture the deep contextualized information in text by pre-training on large-scale corpora.
In this technical report, we present our practice of pre-training language models named NEZHA (NEural contextualiZed
representation for CHinese lAnguage understanding) on Chinese corpora and finetuning for the Chinese NLU tasks.
The current version of NEZHA is based on BERT with a collection of proven improvements, which include Functional
Relative Positional Encoding as an effective positional encoding scheme, Whole Word Masking strategy,
Mixed Precision Training and the LAMB Optimizer in training the models. The experimental results show that NEZHA
achieves the state-of-the-art performances when finetuned on several representative Chinese tasks, including
named entity recognition (People's Daily NER), sentence matching (LCQMC), Chinese sentiment classification (ChnSenti)
and natural language inference (XNLI).*
This model was contributed by [sijunhe](https://huggingface.co/sijunhe). The original code can be found [here](https://github.com/huawei-noah/Pretrained-Language-Model/tree/master/NEZHA-PyTorch).
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## NezhaConfig
[[autodoc]] NezhaConfig
## NezhaModel
[[autodoc]] NezhaModel
- forward
## NezhaForPreTraining
[[autodoc]] NezhaForPreTraining
- forward
## NezhaForMaskedLM
[[autodoc]] NezhaForMaskedLM
- forward
## NezhaForNextSentencePrediction
[[autodoc]] NezhaForNextSentencePrediction
- forward
## NezhaForSequenceClassification
[[autodoc]] NezhaForSequenceClassification
- forward
## NezhaForMultipleChoice
[[autodoc]] NezhaForMultipleChoice
- forward
## NezhaForTokenClassification
[[autodoc]] NezhaForTokenClassification
- forward
## NezhaForQuestionAnswering
[[autodoc]] NezhaForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/nezha.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/nezha.md",
"repo_id": "transformers",
"token_count": 996
} | 262 |
<!--Copyright 2024 The Qwen Team and The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Qwen2MoE
## Overview
Qwen2MoE is the new model series of large language models from the Qwen team. Previously, we released the Qwen series, including Qwen-72B, Qwen-1.8B, Qwen-VL, Qwen-Audio, etc.
### Model Details
Qwen2MoE is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. Qwen2MoE has the following architectural choices:
- Qwen2MoE is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes.
- Qwen2MoE employs Mixture of Experts (MoE) architecture, where the models are upcycled from dense language models. For instance, `Qwen1.5-MoE-A2.7B` is upcycled from `Qwen-1.8B`. It has 14.3B parameters in total and 2.7B activated parameters during runtime, while it achieves comparable performance with `Qwen1.5-7B`, with only 25% of the training resources.
For more details refer to the [release blog post](https://qwenlm.github.io/blog/qwen-moe/).
## Usage tips
`Qwen1.5-MoE-A2.7B` and `Qwen1.5-MoE-A2.7B-Chat` can be found on the [Huggingface Hub](https://huggingface.co/Qwen)
In the following, we demonstrate how to use `Qwen1.5-MoE-A2.7B-Chat` for the inference. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose.
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> device = "cuda" # the device to load the model onto
>>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-MoE-A2.7B-Chat", device_map="auto")
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-MoE-A2.7B-Chat")
>>> prompt = "Give me a short introduction to large language model."
>>> messages = [{"role": "user", "content": prompt}]
>>> text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
>>> model_inputs = tokenizer([text], return_tensors="pt").to(device)
>>> generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512, do_sample=True)
>>> generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
>>> response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
```
## Qwen2MoeConfig
[[autodoc]] Qwen2MoeConfig
## Qwen2MoeModel
[[autodoc]] Qwen2MoeModel
- forward
## Qwen2MoeForCausalLM
[[autodoc]] Qwen2MoeForCausalLM
- forward
## Qwen2MoeForSequenceClassification
[[autodoc]] Qwen2MoeForSequenceClassification
- forward
## Qwen2MoeForTokenClassification
[[autodoc]] Qwen2MoeForTokenClassification
- forward
| transformers/docs/source/en/model_doc/qwen2_moe.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/qwen2_moe.md",
"repo_id": "transformers",
"token_count": 1111
} | 263 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# SAM
## Overview
SAM (Segment Anything Model) was proposed in [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
The model can be used to predict segmentation masks of any object of interest given an input image.

The abstract from the paper is the following:
*We introduce the Segment Anything (SA) project: a new task, model, and dataset for image segmentation. Using our efficient model in a data collection loop, we built the largest segmentation dataset to date (by far), with over 1 billion masks on 11M licensed and privacy respecting images. The model is designed and trained to be promptable, so it can transfer zero-shot to new image distributions and tasks. We evaluate its capabilities on numerous tasks and find that its zero-shot performance is impressive -- often competitive with or even superior to prior fully supervised results. We are releasing the Segment Anything Model (SAM) and corresponding dataset (SA-1B) of 1B masks and 11M images at [https://segment-anything.com](https://segment-anything.com) to foster research into foundation models for computer vision.*
Tips:
- The model predicts binary masks that states the presence or not of the object of interest given an image.
- The model predicts much better results if input 2D points and/or input bounding boxes are provided
- You can prompt multiple points for the same image, and predict a single mask.
- Fine-tuning the model is not supported yet
- According to the paper, textual input should be also supported. However, at this time of writing this seems not to be supported according to [the official repository](https://github.com/facebookresearch/segment-anything/issues/4#issuecomment-1497626844).
This model was contributed by [ybelkada](https://huggingface.co/ybelkada) and [ArthurZ](https://huggingface.co/ArthurZ).
The original code can be found [here](https://github.com/facebookresearch/segment-anything).
Below is an example on how to run mask generation given an image and a 2D point:
```python
import torch
from PIL import Image
import requests
from transformers import SamModel, SamProcessor
device = "cuda" if torch.cuda.is_available() else "cpu"
model = SamModel.from_pretrained("facebook/sam-vit-huge").to(device)
processor = SamProcessor.from_pretrained("facebook/sam-vit-huge")
img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
input_points = [[[450, 600]]] # 2D location of a window in the image
inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model(**inputs)
masks = processor.image_processor.post_process_masks(
outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()
)
scores = outputs.iou_scores
```
You can also process your own masks alongside the input images in the processor to be passed to the model.
```python
import torch
from PIL import Image
import requests
from transformers import SamModel, SamProcessor
device = "cuda" if torch.cuda.is_available() else "cpu"
model = SamModel.from_pretrained("facebook/sam-vit-huge").to(device)
processor = SamProcessor.from_pretrained("facebook/sam-vit-huge")
img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB")
mask_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
segmentation_map = Image.open(requests.get(mask_url, stream=True).raw).convert("1")
input_points = [[[450, 600]]] # 2D location of a window in the image
inputs = processor(raw_image, input_points=input_points, segmentation_maps=segmentation_map, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model(**inputs)
masks = processor.image_processor.post_process_masks(
outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()
)
scores = outputs.iou_scores
```
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with SAM.
- [Demo notebook](https://github.com/huggingface/notebooks/blob/main/examples/segment_anything.ipynb) for using the model.
- [Demo notebook](https://github.com/huggingface/notebooks/blob/main/examples/automatic_mask_generation.ipynb) for using the automatic mask generation pipeline.
- [Demo notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Run_inference_with_MedSAM_using_HuggingFace_Transformers.ipynb) for inference with MedSAM, a fine-tuned version of SAM on the medical domain. ð
- [Demo notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Fine_tune_SAM_(segment_anything)_on_a_custom_dataset.ipynb) for fine-tuning the model on custom data. ð
## SlimSAM
SlimSAM, a pruned version of SAM, was proposed in [0.1% Data Makes Segment Anything Slim](https://arxiv.org/abs/2312.05284) by Zigeng Chen et al. SlimSAM reduces the size of the SAM models considerably while maintaining the same performance.
Checkpoints can be found on the [hub](https://huggingface.co/models?other=slimsam), and they can be used as a drop-in replacement of SAM.
## Grounded SAM
One can combine [Grounding DINO](grounding-dino) with SAM for text-based mask generation as introduced in [Grounded SAM: Assembling Open-World Models for Diverse Visual Tasks](https://arxiv.org/abs/2401.14159). You can refer to this [demo notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Grounding%20DINO/GroundingDINO_with_Segment_Anything.ipynb) ð for details.
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/grounded_sam.png"
alt="drawing" width="900"/>
<small> Grounded SAM overview. Taken from the <a href="https://github.com/IDEA-Research/Grounded-Segment-Anything">original repository</a>. </small>
## SamConfig
[[autodoc]] SamConfig
## SamVisionConfig
[[autodoc]] SamVisionConfig
## SamMaskDecoderConfig
[[autodoc]] SamMaskDecoderConfig
## SamPromptEncoderConfig
[[autodoc]] SamPromptEncoderConfig
## SamProcessor
[[autodoc]] SamProcessor
## SamImageProcessor
[[autodoc]] SamImageProcessor
## SamModel
[[autodoc]] SamModel
- forward
## TFSamModel
[[autodoc]] TFSamModel
- call
| transformers/docs/source/en/model_doc/sam.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/sam.md",
"repo_id": "transformers",
"token_count": 2257
} | 264 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the MIT License; you may not use this file except in compliance with
the License.
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# SuperPoint
## Overview
The SuperPoint model was proposed
in [SuperPoint: Self-Supervised Interest Point Detection and Description](https://arxiv.org/abs/1712.07629) by Daniel
DeTone, Tomasz Malisiewicz and Andrew Rabinovich.
This model is the result of a self-supervised training of a fully-convolutional network for interest point detection and
description. The model is able to detect interest points that are repeatable under homographic transformations and
provide a descriptor for each point. The use of the model in its own is limited, but it can be used as a feature
extractor for other tasks such as homography estimation, image matching, etc.
The abstract from the paper is the following:
*This paper presents a self-supervised framework for training interest point detectors and descriptors suitable for a
large number of multiple-view geometry problems in computer vision. As opposed to patch-based neural networks, our
fully-convolutional model operates on full-sized images and jointly computes pixel-level interest point locations and
associated descriptors in one forward pass. We introduce Homographic Adaptation, a multi-scale, multi-homography
approach for boosting interest point detection repeatability and performing cross-domain adaptation (e.g.,
synthetic-to-real). Our model, when trained on the MS-COCO generic image dataset using Homographic Adaptation, is able
to repeatedly detect a much richer set of interest points than the initial pre-adapted deep model and any other
traditional corner detector. The final system gives rise to state-of-the-art homography estimation results on HPatches
when compared to LIFT, SIFT and ORB.*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/superpoint_architecture.png"
alt="drawing" width="500"/>
<small> SuperPoint overview. Taken from the <a href="https://arxiv.org/abs/1712.07629v4">original paper.</a> </small>
## Usage tips
Here is a quick example of using the model to detect interest points in an image:
```python
from transformers import AutoImageProcessor, SuperPointForKeypointDetection
import torch
from PIL import Image
import requests
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
inputs = processor(image, return_tensors="pt")
outputs = model(**inputs)
```
The outputs contain the list of keypoint coordinates with their respective score and description (a 256-long vector).
You can also feed multiple images to the model. Due to the nature of SuperPoint, to output a dynamic number of keypoints,
you will need to use the mask attribute to retrieve the respective information :
```python
from transformers import AutoImageProcessor, SuperPointForKeypointDetection
import torch
from PIL import Image
import requests
url_image_1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
image_1 = Image.open(requests.get(url_image_1, stream=True).raw)
url_image_2 = "http://images.cocodataset.org/test-stuff2017/000000000568.jpg"
image_2 = Image.open(requests.get(url_image_2, stream=True).raw)
images = [image_1, image_2]
processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
inputs = processor(images, return_tensors="pt")
outputs = model(**inputs)
for i in range(len(images)):
image_mask = outputs.mask[i]
image_indices = torch.nonzero(image_mask).squeeze()
image_keypoints = outputs.keypoints[i][image_indices]
image_scores = outputs.scores[i][image_indices]
image_descriptors = outputs.descriptors[i][image_indices]
```
You can then print the keypoints on the image to visualize the result :
```python
import cv2
for keypoint, score in zip(image_keypoints, image_scores):
keypoint_x, keypoint_y = int(keypoint[0].item()), int(keypoint[1].item())
color = tuple([score.item() * 255] * 3)
image = cv2.circle(image, (keypoint_x, keypoint_y), 2, color)
cv2.imwrite("output_image.png", image)
```
This model was contributed by [stevenbucaille](https://huggingface.co/stevenbucaille).
The original code can be found [here](https://github.com/magicleap/SuperPointPretrainedNetwork).
## Resources
A list of official Hugging Face and community (indicated by ð) resources to help you get started with SuperPoint. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
- A notebook showcasing inference and visualization with SuperPoint can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SuperPoint/Inference_with_SuperPoint_to_detect_interest_points_in_an_image.ipynb). ð
## SuperPointConfig
[[autodoc]] SuperPointConfig
## SuperPointImageProcessor
[[autodoc]] SuperPointImageProcessor
- preprocess
## SuperPointForKeypointDetection
[[autodoc]] SuperPointForKeypointDetection
- forward
| transformers/docs/source/en/model_doc/superpoint.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/superpoint.md",
"repo_id": "transformers",
"token_count": 1654
} | 265 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# TVLT
<Tip warning={true}>
This model is in maintenance mode only, we don't accept any new PRs changing its code.
If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2.
You can do so by running the following command: `pip install -U transformers==4.40.2`.
</Tip>
## Overview
The TVLT model was proposed in [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156)
by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal (the first three authors contributed equally). The Textless Vision-Language Transformer (TVLT) is a model that uses raw visual and audio inputs for vision-and-language representation learning, without using text-specific modules such as tokenization or automatic speech recognition (ASR). It can perform various audiovisual and vision-language tasks like retrieval, question answering, etc.
The abstract from the paper is the following:
*In this work, we present the Textless Vision-Language Transformer (TVLT), where homogeneous transformer blocks take raw visual and audio inputs for vision-and-language representation learning with minimal modality-specific design, and do not use text-specific modules such as tokenization or automatic speech recognition (ASR). TVLT is trained by reconstructing masked patches of continuous video frames and audio spectrograms (masked autoencoding) and contrastive modeling to align video and audio. TVLT attains performance comparable to its text-based counterpart on various multimodal tasks, such as visual question answering, image retrieval, video retrieval, and multimodal sentiment analysis, with 28x faster inference speed and only 1/3 of the parameters. Our findings suggest the possibility of learning compact and efficient visual-linguistic representations from low-level visual and audio signals without assuming the prior existence of text.*
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/tvlt_architecture.png"
alt="drawing" width="600"/>
</p>
<small> TVLT architecture. Taken from the <a href="[https://arxiv.org/abs/2102.03334](https://arxiv.org/abs/2209.14156)">original paper</a>. </small>
The original code can be found [here](https://github.com/zinengtang/TVLT). This model was contributed by [Zineng Tang](https://huggingface.co/ZinengTang).
## Usage tips
- TVLT is a model that takes both `pixel_values` and `audio_values` as input. One can use [`TvltProcessor`] to prepare data for the model.
This processor wraps an image processor (for the image/video modality) and an audio feature extractor (for the audio modality) into one.
- TVLT is trained with images/videos and audios of various sizes: the authors resize and crop the input images/videos to 224 and limit the length of audio spectrogram to 2048. To make batching of videos and audios possible, the authors use a `pixel_mask` that indicates which pixels are real/padding and `audio_mask` that indicates which audio values are real/padding.
- The design of TVLT is very similar to that of a standard Vision Transformer (ViT) and masked autoencoder (MAE) as in [ViTMAE](vitmae). The difference is that the model includes embedding layers for the audio modality.
- The PyTorch version of this model is only available in torch 1.10 and higher.
## TvltConfig
[[autodoc]] TvltConfig
## TvltProcessor
[[autodoc]] TvltProcessor
- __call__
## TvltImageProcessor
[[autodoc]] TvltImageProcessor
- preprocess
## TvltFeatureExtractor
[[autodoc]] TvltFeatureExtractor
- __call__
## TvltModel
[[autodoc]] TvltModel
- forward
## TvltForPreTraining
[[autodoc]] TvltForPreTraining
- forward
## TvltForAudioVisualClassification
[[autodoc]] TvltForAudioVisualClassification
- forward
| transformers/docs/source/en/model_doc/tvlt.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/tvlt.md",
"repo_id": "transformers",
"token_count": 1236
} | 266 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# VisualBERT
## Overview
The VisualBERT model was proposed in [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
VisualBERT is a neural network trained on a variety of (image, text) pairs.
The abstract from the paper is the following:
*We propose VisualBERT, a simple and flexible framework for modeling a broad range of vision-and-language tasks.
VisualBERT consists of a stack of Transformer layers that implicitly align elements of an input text and regions in an
associated input image with self-attention. We further propose two visually-grounded language model objectives for
pre-training VisualBERT on image caption data. Experiments on four vision-and-language tasks including VQA, VCR, NLVR2,
and Flickr30K show that VisualBERT outperforms or rivals with state-of-the-art models while being significantly
simpler. Further analysis demonstrates that VisualBERT can ground elements of language to image regions without any
explicit supervision and is even sensitive to syntactic relationships, tracking, for example, associations between
verbs and image regions corresponding to their arguments.*
This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/uclanlp/visualbert).
## Usage tips
1. Most of the checkpoints provided work with the [`VisualBertForPreTraining`] configuration. Other
checkpoints provided are the fine-tuned checkpoints for down-stream tasks - VQA ('visualbert-vqa'), VCR
('visualbert-vcr'), NLVR2 ('visualbert-nlvr2'). Hence, if you are not working on these downstream tasks, it is
recommended that you use the pretrained checkpoints.
2. For the VCR task, the authors use a fine-tuned detector for generating visual embeddings, for all the checkpoints.
We do not provide the detector and its weights as a part of the package, but it will be available in the research
projects, and the states can be loaded directly into the detector provided.
VisualBERT is a multi-modal vision and language model. It can be used for visual question answering, multiple choice,
visual reasoning and region-to-phrase correspondence tasks. VisualBERT uses a BERT-like transformer to prepare
embeddings for image-text pairs. Both the text and visual features are then projected to a latent space with identical
dimension.
To feed images to the model, each image is passed through a pre-trained object detector and the regions and the
bounding boxes are extracted. The authors use the features generated after passing these regions through a pre-trained
CNN like ResNet as visual embeddings. They also add absolute position embeddings, and feed the resulting sequence of
vectors to a standard BERT model. The text input is concatenated in the front of the visual embeddings in the embedding
layer, and is expected to be bound by [CLS] and a [SEP] tokens, as in BERT. The segment IDs must also be set
appropriately for the textual and visual parts.
The [`BertTokenizer`] is used to encode the text. A custom detector/image processor must be used
to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models:
- [VisualBERT VQA demo notebook](https://github.com/huggingface/transformers/tree/main/examples/research_projects/visual_bert) : This notebook
contains an example on VisualBERT VQA.
- [Generate Embeddings for VisualBERT (Colab Notebook)](https://colab.research.google.com/drive/1bLGxKdldwqnMVA5x4neY7-l_8fKGWQYI?usp=sharing) : This notebook contains
an example on how to generate visual embeddings.
The following example shows how to get the last hidden state using [`VisualBertModel`]:
```python
>>> import torch
>>> from transformers import BertTokenizer, VisualBertModel
>>> model = VisualBertModel.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
>>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
>>> inputs = tokenizer("What is the man eating?", return_tensors="pt")
>>> # this is a custom function that returns the visual embeddings given the image path
>>> visual_embeds = get_visual_embeddings(image_path)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> inputs.update(
... {
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask,
... }
... )
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```
## VisualBertConfig
[[autodoc]] VisualBertConfig
## VisualBertModel
[[autodoc]] VisualBertModel
- forward
## VisualBertForPreTraining
[[autodoc]] VisualBertForPreTraining
- forward
## VisualBertForQuestionAnswering
[[autodoc]] VisualBertForQuestionAnswering
- forward
## VisualBertForMultipleChoice
[[autodoc]] VisualBertForMultipleChoice
- forward
## VisualBertForVisualReasoning
[[autodoc]] VisualBertForVisualReasoning
- forward
## VisualBertForRegionToPhraseAlignment
[[autodoc]] VisualBertForRegionToPhraseAlignment
- forward
| transformers/docs/source/en/model_doc/visual_bert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/visual_bert.md",
"repo_id": "transformers",
"token_count": 1680
} | 267 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# XGLM
## Overview
The XGLM model was proposed in [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668)
by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal,
Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo,
Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
The abstract from the paper is the following:
*Large-scale autoregressive language models such as GPT-3 are few-shot learners that can perform a wide range of language
tasks without fine-tuning. While these models are known to be able to jointly represent many different languages,
their training data is dominated by English, potentially limiting their cross-lingual generalization.
In this work, we train multilingual autoregressive language models on a balanced corpus covering a diverse set of languages,
and study their few- and zero-shot learning capabilities in a wide range of tasks. Our largest model with 7.5 billion parameters
sets new state of the art in few-shot learning in more than 20 representative languages, outperforming GPT-3 of comparable size
in multilingual commonsense reasoning (with +7.4% absolute accuracy improvement in 0-shot settings and +9.4% in 4-shot settings)
and natural language inference (+5.4% in each of 0-shot and 4-shot settings). On the FLORES-101 machine translation benchmark,
our model outperforms GPT-3 on 171 out of 182 translation directions with 32 training examples, while surpassing the
official supervised baseline in 45 directions. We present a detailed analysis of where the model succeeds and fails,
showing in particular that it enables cross-lingual in-context learning on some tasks, while there is still room for improvement
on surface form robustness and adaptation to tasks that do not have a natural cloze form. Finally, we evaluate our models
in social value tasks such as hate speech detection in five languages and find it has limitations similar to comparable sized GPT-3 models.*
This model was contributed by [Suraj](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/xglm).
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
## XGLMConfig
[[autodoc]] XGLMConfig
## XGLMTokenizer
[[autodoc]] XGLMTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## XGLMTokenizerFast
[[autodoc]] XGLMTokenizerFast
<frameworkcontent>
<pt>
## XGLMModel
[[autodoc]] XGLMModel
- forward
## XGLMForCausalLM
[[autodoc]] XGLMForCausalLM
- forward
</pt>
<tf>
## TFXGLMModel
[[autodoc]] TFXGLMModel
- call
## TFXGLMForCausalLM
[[autodoc]] TFXGLMForCausalLM
- call
</tf>
<jax>
## FlaxXGLMModel
[[autodoc]] FlaxXGLMModel
- __call__
## FlaxXGLMForCausalLM
[[autodoc]] FlaxXGLMForCausalLM
- __call__
</jax>
</frameworkcontent> | transformers/docs/source/en/model_doc/xglm.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/xglm.md",
"repo_id": "transformers",
"token_count": 1137
} | 268 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Multilingual models for inference
[[open-in-colab]]
There are several multilingual models in ð€ Transformers, and their inference usage differs from monolingual models. Not *all* multilingual model usage is different though. Some models, like [google-bert/bert-base-multilingual-uncased](https://huggingface.co/google-bert/bert-base-multilingual-uncased), can be used just like a monolingual model. This guide will show you how to use multilingual models whose usage differs for inference.
## XLM
XLM has ten different checkpoints, only one of which is monolingual. The nine remaining model checkpoints can be split into two categories: the checkpoints that use language embeddings and those that don't.
### XLM with language embeddings
The following XLM models use language embeddings to specify the language used at inference:
- `FacebookAI/xlm-mlm-ende-1024` (Masked language modeling, English-German)
- `FacebookAI/xlm-mlm-enfr-1024` (Masked language modeling, English-French)
- `FacebookAI/xlm-mlm-enro-1024` (Masked language modeling, English-Romanian)
- `FacebookAI/xlm-mlm-xnli15-1024` (Masked language modeling, XNLI languages)
- `FacebookAI/xlm-mlm-tlm-xnli15-1024` (Masked language modeling + translation, XNLI languages)
- `FacebookAI/xlm-clm-enfr-1024` (Causal language modeling, English-French)
- `FacebookAI/xlm-clm-ende-1024` (Causal language modeling, English-German)
Language embeddings are represented as a tensor of the same shape as the `input_ids` passed to the model. The values in these tensors depend on the language used and are identified by the tokenizer's `lang2id` and `id2lang` attributes.
In this example, load the `FacebookAI/xlm-clm-enfr-1024` checkpoint (Causal language modeling, English-French):
```py
>>> import torch
>>> from transformers import XLMTokenizer, XLMWithLMHeadModel
>>> tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-clm-enfr-1024")
>>> model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-clm-enfr-1024")
```
The `lang2id` attribute of the tokenizer displays this model's languages and their ids:
```py
>>> print(tokenizer.lang2id)
{'en': 0, 'fr': 1}
```
Next, create an example input:
```py
>>> input_ids = torch.tensor([tokenizer.encode("Wikipedia was used to")]) # batch size of 1
```
Set the language id as `"en"` and use it to define the language embedding. The language embedding is a tensor filled with `0` since that is the language id for English. This tensor should be the same size as `input_ids`.
```py
>>> language_id = tokenizer.lang2id["en"] # 0
>>> langs = torch.tensor([language_id] * input_ids.shape[1]) # torch.tensor([0, 0, 0, ..., 0])
>>> # We reshape it to be of size (batch_size, sequence_length)
>>> langs = langs.view(1, -1) # is now of shape [1, sequence_length] (we have a batch size of 1)
```
Now you can pass the `input_ids` and language embedding to the model:
```py
>>> outputs = model(input_ids, langs=langs)
```
The [run_generation.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation/run_generation.py) script can generate text with language embeddings using the `xlm-clm` checkpoints.
### XLM without language embeddings
The following XLM models do not require language embeddings during inference:
- `FacebookAI/xlm-mlm-17-1280` (Masked language modeling, 17 languages)
- `FacebookAI/xlm-mlm-100-1280` (Masked language modeling, 100 languages)
These models are used for generic sentence representations, unlike the previous XLM checkpoints.
## BERT
The following BERT models can be used for multilingual tasks:
- `google-bert/bert-base-multilingual-uncased` (Masked language modeling + Next sentence prediction, 102 languages)
- `google-bert/bert-base-multilingual-cased` (Masked language modeling + Next sentence prediction, 104 languages)
These models do not require language embeddings during inference. They should identify the language from the
context and infer accordingly.
## XLM-RoBERTa
The following XLM-RoBERTa models can be used for multilingual tasks:
- `FacebookAI/xlm-roberta-base` (Masked language modeling, 100 languages)
- `FacebookAI/xlm-roberta-large` (Masked language modeling, 100 languages)
XLM-RoBERTa was trained on 2.5TB of newly created and cleaned CommonCrawl data in 100 languages. It provides strong gains over previously released multilingual models like mBERT or XLM on downstream tasks like classification, sequence labeling, and question answering.
## M2M100
The following M2M100 models can be used for multilingual translation:
- `facebook/m2m100_418M` (Translation)
- `facebook/m2m100_1.2B` (Translation)
In this example, load the `facebook/m2m100_418M` checkpoint to translate from Chinese to English. You can set the source language in the tokenizer:
```py
>>> from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
>>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger."
>>> chinese_text = "äžèŠææå·«åž«çäºå, å çºä»åæ¯åŸ®åŠç, åŸå¿«å°±æçŒæ."
>>> tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="zh")
>>> model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
```
Tokenize the text:
```py
>>> encoded_zh = tokenizer(chinese_text, return_tensors="pt")
```
M2M100 forces the target language id as the first generated token to translate to the target language. Set the `forced_bos_token_id` to `en` in the `generate` method to translate to English:
```py
>>> generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en"))
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
'Do not interfere with the matters of the witches, because they are delicate and will soon be angry.'
```
## MBart
The following MBart models can be used for multilingual translation:
- `facebook/mbart-large-50-one-to-many-mmt` (One-to-many multilingual machine translation, 50 languages)
- `facebook/mbart-large-50-many-to-many-mmt` (Many-to-many multilingual machine translation, 50 languages)
- `facebook/mbart-large-50-many-to-one-mmt` (Many-to-one multilingual machine translation, 50 languages)
- `facebook/mbart-large-50` (Multilingual translation, 50 languages)
- `facebook/mbart-large-cc25`
In this example, load the `facebook/mbart-large-50-many-to-many-mmt` checkpoint to translate Finnish to English. You can set the source language in the tokenizer:
```py
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
>>> en_text = "Do not meddle in the affairs of wizards, for they are subtle and quick to anger."
>>> fi_text = "ÃlÀ sekaannu velhojen asioihin, sillÀ ne ovat hienovaraisia ja nopeasti vihaisia."
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-many-to-many-mmt", src_lang="fi_FI")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
```
Tokenize the text:
```py
>>> encoded_en = tokenizer(en_text, return_tensors="pt")
```
MBart forces the target language id as the first generated token to translate to the target language. Set the `forced_bos_token_id` to `en` in the `generate` method to translate to English:
```py
>>> generated_tokens = model.generate(**encoded_en, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
>>> tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
"Don't interfere with the wizard's affairs, because they are subtle, will soon get angry."
```
If you are using the `facebook/mbart-large-50-many-to-one-mmt` checkpoint, you don't need to force the target language id as the first generated token otherwise the usage is the same.
| transformers/docs/source/en/multilingual.md/0 | {
"file_path": "transformers/docs/source/en/multilingual.md",
"repo_id": "transformers",
"token_count": 2588
} | 269 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Philosophy
ð€ Transformers is an opinionated library built for:
- machine learning researchers and educators seeking to use, study or extend large-scale Transformers models.
- hands-on practitioners who want to fine-tune those models or serve them in production, or both.
- engineers who just want to download a pretrained model and use it to solve a given machine learning task.
The library was designed with two strong goals in mind:
1. Be as easy and fast to use as possible:
- We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions,
just three standard classes required to use each model: [configuration](main_classes/configuration),
[models](main_classes/model), and a preprocessing class ([tokenizer](main_classes/tokenizer) for NLP, [image processor](main_classes/image_processor) for vision, [feature extractor](main_classes/feature_extractor) for audio, and [processor](main_classes/processors) for multimodal inputs).
- All of these classes can be initialized in a simple and unified way from pretrained instances by using a common
`from_pretrained()` method which downloads (if needed), caches and
loads the related class instance and associated data (configurations' hyperparameters, tokenizers' vocabulary,
and models' weights) from a pretrained checkpoint provided on [Hugging Face Hub](https://huggingface.co/models) or your own saved checkpoint.
- On top of those three base classes, the library provides two APIs: [`pipeline`] for quickly
using a model for inference on a given task and [`Trainer`] to quickly train or fine-tune a PyTorch model (all TensorFlow models are compatible with `Keras.fit`).
- As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to
extend or build upon the library, just use regular Python, PyTorch, TensorFlow, Keras modules and inherit from the base
classes of the library to reuse functionalities like model loading and saving. If you'd like to learn more about our coding philosophy for models, check out our [Repeat Yourself](https://huggingface.co/blog/transformers-design-philosophy) blog post.
2. Provide state-of-the-art models with performances as close as possible to the original models:
- We provide at least one example for each architecture which reproduces a result provided by the official authors
of said architecture.
- The code is usually as close to the original code base as possible which means some PyTorch code may be not as
*pytorchic* as it could be as a result of being converted TensorFlow code and vice versa.
A few other goals:
- Expose the models' internals as consistently as possible:
- We give access, using a single API, to the full hidden-states and attention weights.
- The preprocessing classes and base model APIs are standardized to easily switch between models.
- Incorporate a subjective selection of promising tools for fine-tuning and investigating these models:
- A simple and consistent way to add new tokens to the vocabulary and embeddings for fine-tuning.
- Simple ways to mask and prune Transformer heads.
- Easily switch between PyTorch, TensorFlow 2.0 and Flax, allowing training with one framework and inference with another.
## Main concepts
The library is built around three types of classes for each model:
- **Model classes** can be PyTorch models ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras models ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) or JAX/Flax models ([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)) that work with the pretrained weights provided in the library.
- **Configuration classes** store the hyperparameters required to build a model (such as the number of layers and hidden size). You don't always need to instantiate these yourself. In particular, if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model).
- **Preprocessing classes** convert the raw data into a format accepted by the model. A [tokenizer](main_classes/tokenizer) stores the vocabulary for each model and provide methods for encoding and decoding strings in a list of token embedding indices to be fed to a model. [Image processors](main_classes/image_processor) preprocess vision inputs, [feature extractors](main_classes/feature_extractor) preprocess audio inputs, and a [processor](main_classes/processors) handles multimodal inputs.
All these classes can be instantiated from pretrained instances, saved locally, and shared on the Hub with three methods:
- `from_pretrained()` lets you instantiate a model, configuration, and preprocessing class from a pretrained version either
provided by the library itself (the supported models can be found on the [Model Hub](https://huggingface.co/models)) or
stored locally (or on a server) by the user.
- `save_pretrained()` lets you save a model, configuration, and preprocessing class locally so that it can be reloaded using
`from_pretrained()`.
- `push_to_hub()` lets you share a model, configuration, and a preprocessing class to the Hub, so it is easily accessible to everyone.
| transformers/docs/source/en/philosophy.md/0 | {
"file_path": "transformers/docs/source/en/philosophy.md",
"repo_id": "transformers",
"token_count": 1518
} | 270 |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# TorchAO
[TorchAO](https://github.com/pytorch/ao) is an architecture optimization library for PyTorch, it provides high performance dtypes, optimization techniques and kernels for inference and training, featuring composability with native PyTorch features like `torch.compile`, FSDP etc.. Some benchmark numbers can be found [here](https://github.com/pytorch/ao/tree/main?tab=readme-ov-file#without-intrusive-code-changes)
Before you begin, make sure the following libraries are installed with their latest version:
```bash
pip install --upgrade torch torchao
```
```py
from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer
model_name = "meta-llama/Meta-Llama-3-8B"
# We support int4_weight_only, int8_weight_only and int8_dynamic_activation_int8_weight
# More examples and documentations for arguments can be found in https://github.com/pytorch/ao/tree/main/torchao/quantization#other-available-quantization-techniques
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", quantization_config=quantization_config)
tokenizer = AutoTokenizer.from_pretrained(model_name)
input_text = "What are we having for dinner?"
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
# compile the quantizd model to get speedup
import torchao
torchao.quantization.utils.recommended_inductor_config_setter()
quantized_model = torch.compile(quantized_model, mode="max-autotune")
output = quantized_model.generate(**input_ids, max_new_tokens=10)
print(tokenizer.decode(output[0], skip_special_tokens=True))
```
torchao quantization is implemented with tensor subclasses, currently it does not work with huggingface serialization, both the safetensor option and [non-safetensor option](https://github.com/huggingface/transformers/issues/32364), we'll update here with instructions when it's working.
| transformers/docs/source/en/quantization/torchao.md/0 | {
"file_path": "transformers/docs/source/en/quantization/torchao.md",
"repo_id": "transformers",
"token_count": 759
} | 271 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Causal language modeling
[[open-in-colab]]
There are two types of language modeling, causal and masked. This guide illustrates causal language modeling.
Causal language models are frequently used for text generation. You can use these models for creative applications like
choosing your own text adventure or an intelligent coding assistant like Copilot or CodeParrot.
<Youtube id="Vpjb1lu0MDk"/>
Causal language modeling predicts the next token in a sequence of tokens, and the model can only attend to tokens on
the left. This means the model cannot see future tokens. GPT-2 is an example of a causal language model.
This guide will show you how to:
1. Finetune [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) on the [r/askscience](https://www.reddit.com/r/askscience/) subset of the [ELI5](https://huggingface.co/datasets/eli5) dataset.
2. Use your finetuned model for inference.
<Tip>
To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/text-generation)
</Tip>
Before you begin, make sure you have all the necessary libraries installed:
```bash
pip install transformers datasets evaluate
```
We encourage you to log in to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to log in:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Load ELI5 dataset
Start by loading the first 5000 examples from the [ELI5-Category](https://huggingface.co/datasets/eli5_category) dataset with the ð€ Datasets library. This'll give you a chance to experiment and make sure everything works before spending more time training on the full dataset.
```py
>>> from datasets import load_dataset
>>> eli5 = load_dataset("eli5_category", split="train[:5000]")
```
Split the dataset's `train` split into a train and test set with the [`~datasets.Dataset.train_test_split`] method:
```py
>>> eli5 = eli5.train_test_split(test_size=0.2)
```
Then take a look at an example:
```py
>>> eli5["train"][0]
{'q_id': '7h191n',
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
'selftext': '',
'category': 'Economics',
'subreddit': 'explainlikeimfive',
'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
'score': [21, 19, 5, 3],
'text_urls': [[],
[],
[],
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]},
'title_urls': ['url'],
'selftext_urls': ['url']}
```
While this may look like a lot, you're only really interested in the `text` field. What's cool about language modeling
tasks is you don't need labels (also known as an unsupervised task) because the next word *is* the label.
## Preprocess
<Youtube id="ma1TrR7gE7I"/>
The next step is to load a DistilGPT2 tokenizer to process the `text` subfield:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
```
You'll notice from the example above, the `text` field is actually nested inside `answers`. This means you'll need to
extract the `text` subfield from its nested structure with the [`flatten`](https://huggingface.co/docs/datasets/process#flatten) method:
```py
>>> eli5 = eli5.flatten()
>>> eli5["train"][0]
{'q_id': '7h191n',
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
'selftext': '',
'category': 'Economics',
'subreddit': 'explainlikeimfive',
'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
'answers.score': [21, 19, 5, 3],
'answers.text_urls': [[],
[],
[],
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']],
'title_urls': ['url'],
'selftext_urls': ['url']}
```
Each subfield is now a separate column as indicated by the `answers` prefix, and the `text` field is a list now. Instead
of tokenizing each sentence separately, convert the list to a string so you can jointly tokenize them.
Here is a first preprocessing function to join the list of strings for each example and tokenize the result:
```py
>>> def preprocess_function(examples):
... return tokenizer([" ".join(x) for x in examples["answers.text"]])
```
To apply this preprocessing function over the entire dataset, use the ð€ Datasets [`~datasets.Dataset.map`] method. You can speed up the `map` function by setting `batched=True` to process multiple elements of the dataset at once, and increasing the number of processes with `num_proc`. Remove any columns you don't need:
```py
>>> tokenized_eli5 = eli5.map(
... preprocess_function,
... batched=True,
... num_proc=4,
... remove_columns=eli5["train"].column_names,
... )
```
This dataset contains the token sequences, but some of these are longer than the maximum input length for the model.
You can now use a second preprocessing function to
- concatenate all the sequences
- split the concatenated sequences into shorter chunks defined by `block_size`, which should be both shorter than the maximum input length and short enough for your GPU RAM.
```py
>>> block_size = 128
>>> def group_texts(examples):
... # Concatenate all texts.
... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
... total_length = len(concatenated_examples[list(examples.keys())[0]])
... # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
... # customize this part to your needs.
... if total_length >= block_size:
... total_length = (total_length // block_size) * block_size
... # Split by chunks of block_size.
... result = {
... k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
... for k, t in concatenated_examples.items()
... }
... result["labels"] = result["input_ids"].copy()
... return result
```
Apply the `group_texts` function over the entire dataset:
```py
>>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)
```
Now create a batch of examples using [`DataCollatorForLanguageModeling`]. It's more efficient to *dynamically pad* the
sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length.
<frameworkcontent>
<pt>
Use the end-of-sequence token as the padding token and set `mlm=False`. This will use the inputs as labels shifted to the right by one element:
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> tokenizer.pad_token = tokenizer.eos_token
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
```
</pt>
<tf>
Use the end-of-sequence token as the padding token and set `mlm=False`. This will use the inputs as labels shifted to the right by one element:
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")
```
</tf>
</frameworkcontent>
## Train
<frameworkcontent>
<pt>
<Tip>
If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the [basic tutorial](../training#train-with-pytorch-trainer)!
</Tip>
You're ready to start training your model now! Load DistilGPT2 with [`AutoModelForCausalLM`]:
```py
>>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer
>>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
```
At this point, only three steps remain:
1. Define your training hyperparameters in [`TrainingArguments`]. The only required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model).
2. Pass the training arguments to [`Trainer`] along with the model, datasets, and data collator.
3. Call [`~Trainer.train`] to finetune your model.
```py
>>> training_args = TrainingArguments(
... output_dir="my_awesome_eli5_clm-model",
... eval_strategy="epoch",
... learning_rate=2e-5,
... weight_decay=0.01,
... push_to_hub=True,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=lm_dataset["train"],
... eval_dataset=lm_dataset["test"],
... data_collator=data_collator,
... )
>>> trainer.train()
```
Once training is completed, use the [`~transformers.Trainer.evaluate`] method to evaluate your model and get its perplexity:
```py
>>> import math
>>> eval_results = trainer.evaluate()
>>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
Perplexity: 49.61
```
Then share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model:
```py
>>> trainer.push_to_hub()
```
</pt>
<tf>
<Tip>
If you aren't familiar with finetuning a model with Keras, take a look at the [basic tutorial](../training#train-a-tensorflow-model-with-keras)!
</Tip>
To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters:
```py
>>> from transformers import create_optimizer, AdamWeightDecay
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
```
Then you can load DistilGPT2 with [`TFAutoModelForCausalLM`]:
```py
>>> from transformers import TFAutoModelForCausalLM
>>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
```
Convert your datasets to the `tf.data.Dataset` format with [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
```py
>>> tf_train_set = model.prepare_tf_dataset(
... lm_dataset["train"],
... shuffle=True,
... batch_size=16,
... collate_fn=data_collator,
... )
>>> tf_test_set = model.prepare_tf_dataset(
... lm_dataset["test"],
... shuffle=False,
... batch_size=16,
... collate_fn=data_collator,
... )
```
Configure the model for training with [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to:
```py
>>> import tensorflow as tf
>>> model.compile(optimizer=optimizer) # No loss argument!
```
This can be done by specifying where to push your model and tokenizer in the [`~transformers.PushToHubCallback`]:
```py
>>> from transformers.keras_callbacks import PushToHubCallback
>>> callback = PushToHubCallback(
... output_dir="my_awesome_eli5_clm-model",
... tokenizer=tokenizer,
... )
```
Finally, you're ready to start training your model! Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) with your training and validation datasets, the number of epochs, and your callback to finetune the model:
```py
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback])
```
Once training is completed, your model is automatically uploaded to the Hub so everyone can use it!
</tf>
</frameworkcontent>
<Tip>
For a more in-depth example of how to finetune a model for causal language modeling, take a look at the corresponding
[PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)
or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
</Tip>
## Inference
Great, now that you've finetuned a model, you can use it for inference!
Come up with a prompt you'd like to generate text from:
```py
>>> prompt = "Somatic hypermutation allows the immune system to"
```
The simplest way to try out your finetuned model for inference is to use it in a [`pipeline`]. Instantiate a `pipeline` for text generation with your model, and pass your text to it:
```py
>>> from transformers import pipeline
>>> generator = pipeline("text-generation", model="username/my_awesome_eli5_clm-model")
>>> generator(prompt)
[{'generated_text': "Somatic hypermutation allows the immune system to be able to effectively reverse the damage caused by an infection.\n\n\nThe damage caused by an infection is caused by the immune system's ability to perform its own self-correcting tasks."}]
```
<frameworkcontent>
<pt>
Tokenize the text and return the `input_ids` as PyTorch tensors:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model")
>>> inputs = tokenizer(prompt, return_tensors="pt").input_ids
```
Use the [`~generation.GenerationMixin.generate`] method to generate text.
For more details about the different text generation strategies and parameters for controlling generation, check out the [Text generation strategies](../generation_strategies) page.
```py
>>> from transformers import AutoModelForCausalLM
>>> model = AutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model")
>>> outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
```
Decode the generated token ids back into text:
```py
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
["Somatic hypermutation allows the immune system to react to drugs with the ability to adapt to a different environmental situation. In other words, a system of 'hypermutation' can help the immune system to adapt to a different environmental situation or in some cases even a single life. In contrast, researchers at the University of Massachusetts-Boston have found that 'hypermutation' is much stronger in mice than in humans but can be found in humans, and that it's not completely unknown to the immune system. A study on how the immune system"]
```
</pt>
<tf>
Tokenize the text and return the `input_ids` as TensorFlow tensors:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model")
>>> inputs = tokenizer(prompt, return_tensors="tf").input_ids
```
Use the [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] method to create the summarization. For more details about the different text generation strategies and parameters for controlling generation, check out the [Text generation strategies](../generation_strategies) page.
```py
>>> from transformers import TFAutoModelForCausalLM
>>> model = TFAutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model")
>>> outputs = model.generate(input_ids=inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
```
Decode the generated token ids back into text:
```py
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Somatic hypermutation allows the immune system to detect the presence of other viruses as they become more prevalent. Therefore, researchers have identified a high proportion of human viruses. The proportion of virus-associated viruses in our study increases with age. Therefore, we propose a simple algorithm to detect the presence of these new viruses in our samples as a sign of improved immunity. A first study based on this algorithm, which will be published in Science on Friday, aims to show that this finding could translate into the development of a better vaccine that is more effective for']
```
</tf>
</frameworkcontent>
| transformers/docs/source/en/tasks/language_modeling.md/0 | {
"file_path": "transformers/docs/source/en/tasks/language_modeling.md",
"repo_id": "transformers",
"token_count": 5525
} | 272 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Visual Question Answering
[[open-in-colab]]
Visual Question Answering (VQA) is the task of answering open-ended questions based on an image.
The input to models supporting this task is typically a combination of an image and a question, and the output is an
answer expressed in natural language.
Some noteworthy use case examples for VQA include:
* Accessibility applications for visually impaired individuals.
* Education: posing questions about visual materials presented in lectures or textbooks. VQA can also be utilized in interactive museum exhibits or historical sites.
* Customer service and e-commerce: VQA can enhance user experience by letting users ask questions about products.
* Image retrieval: VQA models can be used to retrieve images with specific characteristics. For example, the user can ask "Is there a dog?" to find all images with dogs from a set of images.
In this guide you'll learn how to:
- Fine-tune a classification VQA model, specifically [ViLT](../model_doc/vilt), on the [`Graphcore/vqa` dataset](https://huggingface.co/datasets/Graphcore/vqa).
- Use your fine-tuned ViLT for inference.
- Run zero-shot VQA inference with a generative model, like BLIP-2.
## Fine-tuning ViLT
ViLT model incorporates text embeddings into a Vision Transformer (ViT), allowing it to have a minimal design for
Vision-and-Language Pre-training (VLP). This model can be used for several downstream tasks. For the VQA task, a classifier
head is placed on top (a linear layer on top of the final hidden state of the `[CLS]` token) and randomly initialized.
Visual Question Answering is thus treated as a **classification problem**.
More recent models, such as BLIP, BLIP-2, and InstructBLIP, treat VQA as a generative task. Later in this guide we
illustrate how to use them for zero-shot VQA inference.
Before you begin, make sure you have all the necessary libraries installed.
```bash
pip install -q transformers datasets
```
We encourage you to share your model with the community. Log in to your Hugging Face account to upload it to the ð€ Hub.
When prompted, enter your token to log in:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
Let's define the model checkpoint as a global variable.
```py
>>> model_checkpoint = "dandelin/vilt-b32-mlm"
```
## Load the data
For illustration purposes, in this guide we use a very small sample of the annotated visual question answering `Graphcore/vqa` dataset.
You can find the full dataset on [ð€ Hub](https://huggingface.co/datasets/Graphcore/vqa).
As an alternative to the [`Graphcore/vqa` dataset](https://huggingface.co/datasets/Graphcore/vqa), you can download the
same data manually from the official [VQA dataset page](https://visualqa.org/download.html). If you prefer to follow the
tutorial with your custom data, check out how to [Create an image dataset](https://huggingface.co/docs/datasets/image_dataset#loading-script)
guide in the ð€ Datasets documentation.
Let's load the first 200 examples from the validation split and explore the dataset's features:
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("Graphcore/vqa", split="validation[:200]")
>>> dataset
Dataset({
features: ['question', 'question_type', 'question_id', 'image_id', 'answer_type', 'label'],
num_rows: 200
})
```
Let's take a look at an example to understand the dataset's features:
```py
>>> dataset[0]
{'question': 'Where is he looking?',
'question_type': 'none of the above',
'question_id': 262148000,
'image_id': '/root/.cache/huggingface/datasets/downloads/extracted/ca733e0e000fb2d7a09fbcc94dbfe7b5a30750681d0e965f8e0a23b1c2f98c75/val2014/COCO_val2014_000000262148.jpg',
'answer_type': 'other',
'label': {'ids': ['at table', 'down', 'skateboard', 'table'],
'weights': [0.30000001192092896,
1.0,
0.30000001192092896,
0.30000001192092896]}}
```
The features relevant to the task include:
* `question`: the question to be answered from the image
* `image_id`: the path to the image the question refers to
* `label`: the annotations
We can remove the rest of the features as they won't be necessary:
```py
>>> dataset = dataset.remove_columns(['question_type', 'question_id', 'answer_type'])
```
As you can see, the `label` feature contains several answers to the same question (called `ids` here) collected by different human annotators.
This is because the answer to a question can be subjective. In this case, the question is "where is he looking?". Some people
annotated this with "down", others with "at table", another one with "skateboard", etc.
Take a look at the image and consider which answer would you give:
```python
>>> from PIL import Image
>>> image = Image.open(dataset[0]['image_id'])
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/vqa-example.png" alt="VQA Image Example"/>
</div>
Due to the questions' and answers' ambiguity, datasets like this are treated as a multi-label classification problem (as
multiple answers are possibly valid). Moreover, rather than just creating a one-hot encoded vector, one creates a
soft encoding, based on the number of times a certain answer appeared in the annotations.
For instance, in the example above, because the answer "down" is selected way more often than other answers, it has a
score (called `weight` in the dataset) of 1.0, and the rest of the answers have scores < 1.0.
To later instantiate the model with an appropriate classification head, let's create two dictionaries: one that maps
the label name to an integer and vice versa:
```py
>>> import itertools
>>> labels = [item['ids'] for item in dataset['label']]
>>> flattened_labels = list(itertools.chain(*labels))
>>> unique_labels = list(set(flattened_labels))
>>> label2id = {label: idx for idx, label in enumerate(unique_labels)}
>>> id2label = {idx: label for label, idx in label2id.items()}
```
Now that we have the mappings, we can replace the string answers with their ids, and flatten the dataset for a more convenient further preprocessing.
```python
>>> def replace_ids(inputs):
... inputs["label"]["ids"] = [label2id[x] for x in inputs["label"]["ids"]]
... return inputs
>>> dataset = dataset.map(replace_ids)
>>> flat_dataset = dataset.flatten()
>>> flat_dataset.features
{'question': Value(dtype='string', id=None),
'image_id': Value(dtype='string', id=None),
'label.ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None),
'label.weights': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)}
```
## Preprocessing data
The next step is to load a ViLT processor to prepare the image and text data for the model.
[`ViltProcessor`] wraps a BERT tokenizer and ViLT image processor into a convenient single processor:
```py
>>> from transformers import ViltProcessor
>>> processor = ViltProcessor.from_pretrained(model_checkpoint)
```
To preprocess the data we need to encode the images and questions using the [`ViltProcessor`]. The processor will use
the [`BertTokenizerFast`] to tokenize the text and create `input_ids`, `attention_mask` and `token_type_ids` for the text data.
As for images, the processor will leverage [`ViltImageProcessor`] to resize and normalize the image, and create `pixel_values` and `pixel_mask`.
All these preprocessing steps are done under the hood, we only need to call the `processor`. However, we still need to
prepare the target labels. In this representation, each element corresponds to a possible answer (label). For correct answers, the element holds
their respective score (weight), while the remaining elements are set to zero.
The following function applies the `processor` to the images and questions and formats the labels as described above:
```py
>>> import torch
>>> def preprocess_data(examples):
... image_paths = examples['image_id']
... images = [Image.open(image_path) for image_path in image_paths]
... texts = examples['question']
... encoding = processor(images, texts, padding="max_length", truncation=True, return_tensors="pt")
... for k, v in encoding.items():
... encoding[k] = v.squeeze()
... targets = []
... for labels, scores in zip(examples['label.ids'], examples['label.weights']):
... target = torch.zeros(len(id2label))
... for label, score in zip(labels, scores):
... target[label] = score
... targets.append(target)
... encoding["labels"] = targets
... return encoding
```
To apply the preprocessing function over the entire dataset, use ð€ Datasets [`~datasets.map`] function. You can speed up `map` by
setting `batched=True` to process multiple elements of the dataset at once. At this point, feel free to remove the columns you don't need.
```py
>>> processed_dataset = flat_dataset.map(preprocess_data, batched=True, remove_columns=['question','question_type', 'question_id', 'image_id', 'answer_type', 'label.ids', 'label.weights'])
>>> processed_dataset
Dataset({
features: ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values', 'pixel_mask', 'labels'],
num_rows: 200
})
```
As a final step, create a batch of examples using [`DefaultDataCollator`]:
```py
>>> from transformers import DefaultDataCollator
>>> data_collator = DefaultDataCollator()
```
## Train the model
Youâre ready to start training your model now! Load ViLT with [`ViltForQuestionAnswering`]. Specify the number of labels
along with the label mappings:
```py
>>> from transformers import ViltForQuestionAnswering
>>> model = ViltForQuestionAnswering.from_pretrained(model_checkpoint, num_labels=len(id2label), id2label=id2label, label2id=label2id)
```
At this point, only three steps remain:
1. Define your training hyperparameters in [`TrainingArguments`]:
```py
>>> from transformers import TrainingArguments
>>> repo_id = "MariaK/vilt_finetuned_200"
>>> training_args = TrainingArguments(
... output_dir=repo_id,
... per_device_train_batch_size=4,
... num_train_epochs=20,
... save_steps=200,
... logging_steps=50,
... learning_rate=5e-5,
... save_total_limit=2,
... remove_unused_columns=False,
... push_to_hub=True,
... )
```
2. Pass the training arguments to [`Trainer`] along with the model, dataset, processor, and data collator.
```py
>>> from transformers import Trainer
>>> trainer = Trainer(
... model=model,
... args=training_args,
... data_collator=data_collator,
... train_dataset=processed_dataset,
... tokenizer=processor,
... )
```
3. Call [`~Trainer.train`] to finetune your model.
```py
>>> trainer.train()
```
Once training is completed, share your model to the Hub with the [`~Trainer.push_to_hub`] method to share your final model on the ð€ Hub:
```py
>>> trainer.push_to_hub()
```
## Inference
Now that you have fine-tuned a ViLT model, and uploaded it to the ð€ Hub, you can use it for inference. The simplest
way to try out your fine-tuned model for inference is to use it in a [`Pipeline`].
```py
>>> from transformers import pipeline
>>> pipe = pipeline("visual-question-answering", model="MariaK/vilt_finetuned_200")
```
The model in this guide has only been trained on 200 examples, so don't expect a lot from it. Let's see if it at least
learned something from the data and take the first example from the dataset to illustrate inference:
```py
>>> example = dataset[0]
>>> image = Image.open(example['image_id'])
>>> question = example['question']
>>> print(question)
>>> pipe(image, question, top_k=1)
"Where is he looking?"
[{'score': 0.5498199462890625, 'answer': 'down'}]
```
Even though not very confident, the model indeed has learned something. With more examples and longer training, you'll get far better results!
You can also manually replicate the results of the pipeline if you'd like:
1. Take an image and a question, prepare them for the model using the processor from your model.
2. Forward the result or preprocessing through the model.
3. From the logits, get the most likely answer's id, and find the actual answer in the `id2label`.
```py
>>> processor = ViltProcessor.from_pretrained("MariaK/vilt_finetuned_200")
>>> image = Image.open(example['image_id'])
>>> question = example['question']
>>> # prepare inputs
>>> inputs = processor(image, question, return_tensors="pt")
>>> model = ViltForQuestionAnswering.from_pretrained("MariaK/vilt_finetuned_200")
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> logits = outputs.logits
>>> idx = logits.argmax(-1).item()
>>> print("Predicted answer:", model.config.id2label[idx])
Predicted answer: down
```
## Zero-shot VQA
The previous model treated VQA as a classification task. Some recent models, such as BLIP, BLIP-2, and InstructBLIP approach
VQA as a generative task. Let's take [BLIP-2](../model_doc/blip-2) as an example. It introduced a new visual-language pre-training
paradigm in which any combination of pre-trained vision encoder and LLM can be used (learn more in the [BLIP-2 blog post](https://huggingface.co/blog/blip-2)).
This enables achieving state-of-the-art results on multiple visual-language tasks including visual question answering.
Let's illustrate how you can use this model for VQA. First, let's load the model. Here we'll explicitly send the model to a
GPU, if available, which we didn't need to do earlier when training, as [`Trainer`] handles this automatically:
```py
>>> from transformers import AutoProcessor, Blip2ForConditionalGeneration
>>> import torch
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> model.to(device)
```
The model takes image and text as input, so let's use the exact same image/question pair from the first example in the VQA dataset:
```py
>>> example = dataset[0]
>>> image = Image.open(example['image_id'])
>>> question = example['question']
```
To use BLIP-2 for visual question answering task, the textual prompt has to follow a specific format: `Question: {} Answer:`.
```py
>>> prompt = f"Question: {question} Answer:"
```
Now we need to preprocess the image/prompt with the model's processor, pass the processed input through the model, and decode the output:
```py
>>> inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=10)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
>>> print(generated_text)
"He is looking at the crowd"
```
As you can see, the model recognized the crowd, and the direction of the face (looking down), however, it seems to miss
the fact the crowd is behind the skater. Still, in cases where acquiring human-annotated datasets is not feasible, this
approach can quickly produce useful results.
| transformers/docs/source/en/tasks/visual_question_answering.md/0 | {
"file_path": "transformers/docs/source/en/tasks/visual_question_answering.md",
"repo_id": "transformers",
"token_count": 4862
} | 273 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Mecanismos de atención
La mayorÃa de los modelos transformers utilizan atención completa, en el sentido de que la matriz de atención es cuadrada. Esto puede ser un gran cuello de botella computacional cuando tienes textos largos. `Longformer` y `reformer` son modelos que intentan ser más eficientes y utilizan una versión dispersa de la matriz de atención para acelerar el entrenamiento.
## Atención LSH
[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer) utiliza atención LSH. En el softmax(QK^t), solo los elementos más grandes (en la dimensión softmax) de la matriz QK^t van a dar contribuciones útiles. Entonces, para cada consulta q en Q, podemos considerar solo las claves k en K que estén cerca de q. Se utiliza una función hash para determinar si q y k están cerca. La máscara de atención se modifica para enmascarar el token actual (excepto en la primera posición), porque dará una consulta y una clave iguales (entonces muy similares entre sÃ). Dado que el hash puede ser un poco aleatorio, en la práctica se utilizan varias funciones hash (determinadas por un parámetro n_rounds) y luego se promedian juntas.
## Atención local
[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer) utiliza atención local: a menudo, el contexto local (por ejemplo, ¿cuáles son los dos tokens a la izquierda y a la derecha?) es suficiente para tomar acción para un token dado. Además, apilando capas de atención que tienen una ventana pequeña, la última capa tendrá un campo receptivo mayor que solamente los tokens en la ventana, lo que les permite construir una representación de toda la oración.
Algunos tokens de entrada preseleccionados también reciben atención global: para esos pocos tokens, la matriz de atención puede acceder a todos los tokens y este proceso es simétrico: todos los demás tokens tienen acceso a esos tokens especÃficos (además de los que están en su ventana local). Esto se muestra en la Figura 2d del artÃculo, el cual se puede apreciar un ejemplo de una máscara de atención:
<div class="flex justify-center">
<img scale="50 %" align="center" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png"/>
</div>
El uso de dichas matrices de atención con menos parámetros permite que el modelo tenga entradas con una longitud de secuencia mayor.
## Otros trucos
### Codificación posicional axial
[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer) utiliza codificación posicional axial: en los modelos transformers tradicionales, la codificación posicional E es una matriz de tamaño \\(l\\) por \\(d\\), donde \\(l\\) es la longitud de la secuencia y \\(d\\) es la dimensión del estado oculto. Si tienes textos muy extensos, esta matriz puede ser enorme y ocupar demasiado espacio en la GPU. Para aliviar eso, las codificaciones posicionales axiales consisten en factorizar esa gran matriz E en dos matrices más pequeñas E1 y E2, con dimensiones \\(l_{1} \times d_{1}\\) y \\(l_{2} \times d_{2}\\), tal que \\(l_{1} \times l_{2} = l\\) y \\(d_{1} + d_{2} = d\\) (con el producto de las longitudes, esto termina siendo mucho más pequeño). La incrustación (embedding) para el paso de tiempo \\(j\\) en E se obtiene concatenando las incrustaciones para el paso de tiempo \\(j \% l1\\) en E1 y \\(j // l1\\) en E2.
| transformers/docs/source/es/attention.md/0 | {
"file_path": "transformers/docs/source/es/attention.md",
"repo_id": "transformers",
"token_count": 1396
} | 274 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Modelado de lenguaje
El modelado de lenguaje predice palabras en un enunciado. Hay dos formas de modelado de lenguaje.
<Youtube id="Vpjb1lu0MDk"/>
El modelado de lenguaje causal predice el siguiente token en una secuencia de tokens, y el modelo solo puede considerar los tokens a la izquierda.
<Youtube id="mqElG5QJWUg"/>
El modelado de lenguaje por enmascaramiento predice un token enmascarado en una secuencia, y el modelo puede considerar los tokens bidireccionalmente.
Esta guÃa te mostrará cómo realizar fine-tuning [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) para modelos de lenguaje causales y [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) para modelos de lenguaje por enmascaramiento en el [r/askscience](https://www.reddit.com/r/askscience/) subdataset [ELI5](https://huggingface.co/datasets/eli5).
<Tip>
Mira la [página de tarea](https://huggingface.co/tasks/text-generation) para generación de texto y la [página de tarea](https://huggingface.co/tasks/fill-mask) para modelos de lenguajes por enmascaramiento para obtener más información sobre los modelos, datasets, y métricas asociadas.
</Tip>
## Carga el dataset ELI5
Carga solo los primeros 5000 registros desde la biblioteca ð€ Datasets, dado que es bastante grande:
```py
>>> from datasets import load_dataset
>>> eli5 = load_dataset("eli5", split="train_asks[:5000]")
```
Divide este dataset en subdatasets para el entrenamiento y el test:
```py
eli5 = eli5.train_test_split(test_size=0.2)
```
Luego observa un ejemplo:
```py
>>> eli5["train"][0]
{'answers': {'a_id': ['c3d1aib', 'c3d4lya'],
'score': [6, 3],
'text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.",
"Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"]},
'answers_urls': {'url': []},
'document': '',
'q_id': 'nyxfp',
'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?',
'selftext_urls': {'url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg']},
'subreddit': 'askscience',
'title': 'Few questions about this space walk photograph.',
'title_urls': {'url': []}}
```
Observa que `text` es un subcampo anidado dentro del diccionario `answers`. Cuando preproceses el dataset, deberás extraer el subcampo `text` en una columna aparte.
## Preprocesamiento
<Youtube id="ma1TrR7gE7I"/>
Para modelados de lenguaje causales carga el tokenizador DistilGPT2 para procesar el subcampo `text`:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
```
<Youtube id="8PmhEIXhBvI"/>
Para modelados de lenguaje por enmascaramiento carga el tokenizador DistilRoBERTa, en lugar de DistilGPT2:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base")
```
Extrae el subcampo `text` desde su estructura anidado con el método [`flatten`](https://huggingface.co/docs/datasets/process#flatten):
```py
>>> eli5 = eli5.flatten()
>>> eli5["train"][0]
{'answers.a_id': ['c3d1aib', 'c3d4lya'],
'answers.score': [6, 3],
'answers.text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.",
"Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"],
'answers_urls.url': [],
'document': '',
'q_id': 'nyxfp',
'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?',
'selftext_urls.url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg'],
'subreddit': 'askscience',
'title': 'Few questions about this space walk photograph.',
'title_urls.url': []}
```
Cada subcampo es ahora una columna separada, como lo indica el prefijo `answers`. Observa que `answers.text` es una lista. En lugar de tokenizar cada enunciado por separado, convierte la lista en un string para tokenizarlos conjuntamente.
Asà es como puedes crear una función de preprocesamiento para convertir la lista en una cadena y truncar las secuencias para que no superen la longitud máxima de input de DistilGPT2:
```py
>>> def preprocess_function(examples):
... return tokenizer([" ".join(x) for x in examples["answers.text"]], truncation=True)
```
Usa de ð€ Datasets la función [`map`](https://huggingface.co/docs/datasets/process#map) para aplicar la función de preprocesamiento sobre el dataset en su totalidad. Puedes acelerar la función `map` configurando el argumento `batched=True` para procesar múltiples elementos del dataset a la vez y aumentar la cantidad de procesos con `num_proc`. Elimina las columnas que no necesitas:
```py
>>> tokenized_eli5 = eli5.map(
... preprocess_function,
... batched=True,
... num_proc=4,
... remove_columns=eli5["train"].column_names,
... )
```
Ahora necesitas una segunda función de preprocesamiento para capturar el texto truncado de cualquier ejemplo demasiado largo para evitar cualquier pérdida de información. Esta función de preprocesamiento deberÃa:
- Concatenar todo el texto.
- Dividir el texto concatenado en trozos más pequeños definidos por un `block_size`.
```py
>>> block_size = 128
>>> def group_texts(examples):
... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
... total_length = len(concatenated_examples[list(examples.keys())[0]])
... total_length = (total_length // block_size) * block_size
... result = {
... k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
... for k, t in concatenated_examples.items()
... }
... result["labels"] = result["input_ids"].copy()
... return result
```
Aplica la función `group_texts` sobre todo el dataset:
```py
>>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)
```
Para modelados de lenguaje causales, usa [`DataCollatorForLanguageModeling`] para crear un lote de ejemplos. Esto también *rellenará dinámicamente* tu texto a la dimensión del elemento más largo del lote para que de esta manera tengan largo uniforme. Si bien es posible rellenar tu texto en la función `tokenizer` mediante el argumento `padding=True`, el rellenado dinámico es más eficiente.
<frameworkcontent>
<pt>
Puedes usar el token de final de secuencia como el token de relleno y asignar `mlm=False`. Esto usará los inputs como etiquetas movidas un elemento hacia la derecha:
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> tokenizer.pad_token = tokenizer.eos_token
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
```
Para modelados de lenguaje por enmascaramiento usa el mismo [`DataCollatorForLanguageModeling`] excepto que deberás especificar `mlm_probability` para enmascarar tokens aleatoriamente cada vez que iteras sobre los datos.
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> tokenizer.pad_token = tokenizer.eos_token
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)
```
</pt>
<tf>
Puedes usar el token de final de secuencia como el token de relleno y asignar `mlm=False`. Esto usará los inputs como etiquetas movidas un elemento hacia la derecha:
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")
```
Para modelados de lenguajes por enmascaramiento usa el mismo [`DataCollatorForLanguageModeling`] excepto que deberás especificar `mlm_probability` para enmascarar tokens aleatoriamente cada vez que iteras sobre los datos.
```py
>>> from transformers import DataCollatorForLanguageModeling
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")
```
</tf>
</frameworkcontent>
## Modelado de lenguaje causal
El modelado de lenguaje causal es frecuentemente utilizado para generación de texto. Esta sección te muestra cómo realizar fine-tuning a [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) para generar nuevo texto.
### Entrenamiento
<frameworkcontent>
<pt>
Carga DistilGPT2 con [`AutoModelForCausalLM`]:
```py
>>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer
>>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
```
<Tip>
Si no estás familiarizado con el proceso de realizar fine-tuning sobre un modelo con [`Trainer`], considera el tutorial básico [aquÃ](../training#finetune-with-trainer)!
</Tip>
A este punto, solo faltan tres pasos:
1. Definir tus hiperparámetros de entrenamiento en [`TrainingArguments`].
2. Pasarle los argumentos de entrenamiento a [`Trainer`] junto con el modelo, dataset, y el data collator.
3. Realiza la llamada [`~Trainer.train`] para realizar el fine-tuning sobre tu modelo.
```py
>>> training_args = TrainingArguments(
... output_dir="./results",
... eval_strategy="epoch",
... learning_rate=2e-5,
... weight_decay=0.01,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=lm_dataset["train"],
... eval_dataset=lm_dataset["test"],
... data_collator=data_collator,
... )
>>> trainer.train()
```
</pt>
<tf>
Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator:
```py
>>> tf_train_set = lm_dataset["train"].to_tf_dataset(
... columns=["attention_mask", "input_ids", "labels"],
... dummy_labels=True,
... shuffle=True,
... batch_size=16,
... collate_fn=data_collator,
... )
>>> tf_test_set = lm_dataset["test"].to_tf_dataset(
... columns=["attention_mask", "input_ids", "labels"],
... dummy_labels=True,
... shuffle=False,
... batch_size=16,
... collate_fn=data_collator,
... )
```
<Tip>
Si no estás familiarizado con realizar fine-tuning de tus modelos con Keras, considera el tutorial básico [aquÃ](training#finetune-with-keras)!
</Tip>
Crea la función optimizadora, la tasa de aprendizaje, y algunos hiperparámetros de entrenamiento:
```py
>>> from transformers import create_optimizer, AdamWeightDecay
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
```
Carga DistilGPT2 con [`TFAutoModelForCausalLM`]:
```py
>>> from transformers import TFAutoModelForCausalLM
>>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
```
Configura el modelo para entrenamiento con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method):
```py
>>> import tensorflow as tf
>>> model.compile(optimizer=optimizer)
```
Llama a [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo:
```py
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)
```
</tf>
</frameworkcontent>
## Modelado de lenguaje por enmascaramiento
El modelado de lenguaje por enmascaramiento es también conocido como una tarea de rellenar la máscara, pues predice un token enmascarado dada una secuencia. Los modelos de lenguaje por enmascaramiento requieren una buena comprensión del contexto de una secuencia entera, en lugar de solo el contexto a la izquierda. Esta sección te enseña como realizar el fine-tuning de [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) para predecir una palabra enmascarada.
### Entrenamiento
<frameworkcontent>
<pt>
Carga DistilRoBERTa con [`AutoModelForMaskedlM`]:
```py
>>> from transformers import AutoModelForMaskedLM
>>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base")
```
<Tip>
Si no estás familiarizado con el proceso de realizar fine-tuning sobre un modelo con [`Trainer`], considera el tutorial básico [aquÃ](../training#finetune-with-trainer)!
</Tip>
A este punto, solo faltan tres pasos:
1. Definir tus hiperparámetros de entrenamiento en [`TrainingArguments`].
2. Pasarle los argumentos de entrenamiento a [`Trainer`] junto con el modelo, dataset, y el data collator.
3. Realiza la llamada [`~Trainer.train`] para realizar el fine-tuning de tu modelo.
```py
>>> training_args = TrainingArguments(
... output_dir="./results",
... eval_strategy="epoch",
... learning_rate=2e-5,
... num_train_epochs=3,
... weight_decay=0.01,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=lm_dataset["train"],
... eval_dataset=lm_dataset["test"],
... data_collator=data_collator,
... )
>>> trainer.train()
```
</pt>
<tf>
Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator:
```py
>>> tf_train_set = lm_dataset["train"].to_tf_dataset(
... columns=["attention_mask", "input_ids", "labels"],
... dummy_labels=True,
... shuffle=True,
... batch_size=16,
... collate_fn=data_collator,
... )
>>> tf_test_set = lm_dataset["test"].to_tf_dataset(
... columns=["attention_mask", "input_ids", "labels"],
... dummy_labels=True,
... shuffle=False,
... batch_size=16,
... collate_fn=data_collator,
... )
```
<Tip>
Si no estás familiarizado con realizar fine-tuning de tus modelos con Keras, considera el tutorial básico [aquÃ](training#finetune-with-keras)!
</Tip>
Crea la función optimizadora, la tasa de aprendizaje, y algunos hiperparámetros de entrenamiento:
```py
>>> from transformers import create_optimizer, AdamWeightDecay
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
```
Carga DistilRoBERTa con [`TFAutoModelForMaskedLM`]:
```py
>>> from transformers import TFAutoModelForMaskedLM
>>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilroberta-base")
```
Configura el modelo para entrenamiento con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method):
```py
>>> import tensorflow as tf
>>> model.compile(optimizer=optimizer)
```
Llama a [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo:
```py
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3)
```
</tf>
</frameworkcontent>
<Tip>
Para un ejemplo más profundo sobre cómo realizar el fine-tuning sobre un modelo de lenguaje causal, considera
[PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)
o [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
</Tip> | transformers/docs/source/es/tasks/language_modeling.md/0 | {
"file_path": "transformers/docs/source/es/tasks/language_modeling.md",
"repo_id": "transformers",
"token_count": 6287
} | 275 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Entraîner avec un script
En plus des [notebooks](./notebooks) de ð€ Transformers, il existe également des exemples de scripts démontrant comment entraîner un modÚle pour une tâche avec [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) ou [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax).
Vous trouverez également des scripts que nous avons utilisé dans nos [projets de recherche](https://github.com/huggingface/transformers/tree/main/examples/research_projects) et des [exemples "legacy"](https://github.com/huggingface/transformers/tree/main/examples/legacy) qui sont des contributions de la communauté. Ces scripts ne sont pas activement maintenus et nécessitent une version spécifique de ð€ Transformers qui sera probablement incompatible avec la derniÚre version de la librairie.
Les exemples de scripts ne sont pas censés fonctionner immédiatement pour chaque problÚme, et il se peut que vous ayez besoin d'adapter le script au problÚme que vous essayez de résoudre. Pour vous aider dans cette tâche, la plupart des scripts exposent entiÚrement la maniÚre dont les données sont prétraitées, vous permettant de les modifier selon vos besoins.
Pour toute fonctionnalité que vous souhaitez implémenter dans un script d'exemple, veuillez en discuter sur le [forum](https://discuss.huggingface.co/) ou dans une [issue](https://github.com/huggingface/transformers/issues) avant de soumettre une Pull Request. Bien que nous acceptions les corrections de bugs, il est peu probable que nous fusionnions une Pull Request (opération "merge" dans Git) ajoutant plus de fonctionnalités au détriment de la lisibilité.
Ce guide vous montrera comment exécuter un script d'entraînement de résumé en exemple avec [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) et [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization). Tous les exemples sont censés fonctionner avec les deux frameworks, sauf indication contraire.
## Configuration
Pour exécuter avec succÚs la derniÚre version des scripts d'exemple, vous devez **installer ð€ Transformers à partir du code source** dans un nouvel environnement virtuel :
```bash
git clone https://github.com/huggingface/transformers
cd transformers
pip install .
```
Pour les versions plus anciennes des exemples de scripts, cliquez sur le bouton ci-dessous :
<details>
<summary>Exemples pour les anciennes versions de Transformers ð€</summary>
<ul>
<li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li>
<li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li>
</ul>
</details>
Ensuite, changez votre clone actuel de ð€ Transformers pour une version spécifique, comme par exemple v3.5.1 :
```bash
git checkout tags/v3.5.1
```
AprÚs avoir configuré la bonne version de la librairie, accédez au dossier d'exemple de votre choix et installez les prérequis spécifiques à l'exemple.
```bash
pip install -r requirements.txt
```
## Exécuter un script
<frameworkcontent>
<pt>
Le script d'exemple télécharge et prétraite un jeu de données à partir de la bibliothÚque ð€ [Datasets](https://huggingface.co/docs/datasets/). Ensuite, le script affine un ensemble de données à l'aide de [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) sur une architecture qui prend en charge la tâche de résumé. L'exemple suivant montre comment ajuster le modÚle [T5-small](https://huggingface.co/google-t5/t5-small) sur les données [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). Le modÚle T5 nécessite un argument supplémentaire `source_prefix` en raison de la façon dont il a été entraîné. Cette invite permet à T5 de savoir qu'il s'agit d'une tâche de résumé.
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
</pt>
<tf>
Le script d'exemple télécharge et prétraite un jeu de données à partir de la bibliothÚque ð€ [Datasets](https://huggingface.co/docs/datasets/). Ensuite, le script ajuste un modÚle à l'aide de Keras sur une architecture qui prend en charge la tâche de résumé. L'exemple suivant montre comment ajuster le modÚle [T5-small](https://huggingface.co/google-t5/t5-small) sur le jeu de données [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). Le modÚle T5 nécessite un argument supplémentaire source_prefix en raison de la façon dont il a été entraîné. Cette invite permet à T5 de savoir qu'il s'agit d'une tâche de résumé.
```bash
python examples/tensorflow/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 16 \
--num_train_epochs 3 \
--do_train \
--do_eval
```
</tf>
</frameworkcontent>
## Entraînement distribué et précision mixte
[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) prend en charge l'entraînement distribué et la précision mixte, ce qui signifie que vous pouvez également les utiliser dans un script. Pour activer ces deux fonctionnalités :
- Ajoutez l'argument fp16 pour activer la précision mixte.
- Définissez le nombre de GPU à utiliser avec l'argument `nproc_per_node`.
```bash
torchrun \
--nproc_per_node 8 pytorch/summarization/run_summarization.py \
--fp16 \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
Les scripts TensorFlow utilisent une Strategie en Miroir [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) pour l'entraînement distribué, et vous n'avez pas besoin d'ajouter d'arguments supplémentaires au script d'entraînement. Le script TensorFlow utilisera plusieurs GPU par défaut s'ils sont disponibles.
## Exécuter un script sur un TPU
<frameworkcontent>
<pt>
Les unités de traitement de tenseurs (UTT) (TPU) sont spécialement conçues pour accélérer les performances. PyTorch prend en charge les TPU avec le compilateur de deep learning [XLA](https://www.tensorflow.org/xla). Pour utiliser un TPU, lancez le script xla_spawn.py et utilisez l'argument num_cores pour définir le nombre de cÅurs TPU que vous souhaitez utilise
```bash
python xla_spawn.py --num_cores 8 \
summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
</pt>
<tf>
Les scripts TensorFlow utilisent une [`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy) pour l'entraînement sur TPU. Pour utiliser un TPU, passez le nom de la ressource TPU à l'argument tpu.
```bash
python run_summarization.py \
--tpu name_of_tpu_resource \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 16 \
--num_train_epochs 3 \
--do_train \
--do_eval
```
</tf>
</frameworkcontent>
## Exécuter un script avec ð€ Accelerate
ð€ [Accelerate](https://huggingface.co/docs/accelerate) est une bibliothÚque uniquement pour PyTorch qui offre une méthode unifiée pour entraîner un modÚle sur plusieurs types de configurations (CPU uniquement, plusieurs GPU, TPU) tout en maintenant une visibilité complÚte sur la boucle d'entraînement PyTorch. Assurez-vous que vous avez installé ð€ Accelerate si ce n'est pas déjà le cas.
> Note : Comme Accelerate est en développement rapide, la version git d'accelerate doit être installée pour exécuter les scripts.
```bash
pip install git+https://github.com/huggingface/accelerate
```
Au lieu du script `run_summarization.py`, vous devez utiliser le script `run_summarization_no_trainer.py`. Les scripts compatibles avec ð€ Accelerate auront un fichier `task_no_trainer.py` dans le dossier. Commencez par exécuter la commande suivante pour créer et enregistrer un fichier de configuration.
```bash
accelerate config
```
Testez votre configuration pour vous assurer qu'elle est correctement configurée :
```bash
accelerate test
```
Maintenant, vous êtes prêt à lancer l'entraînement :
```bash
accelerate launch run_summarization_no_trainer.py \
--model_name_or_path google-t5/t5-small \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir ~/tmp/tst-summarization
```
## Utiliser un jeu de données personnalisé
Le script de résumé prend en charge les jeux de données personnalisés tant qu'ils sont au format CSV ou JSON Line. Lorsque vous utilisez votre propre jeu de données, vous devez spécifier plusieurs arguments supplémentaires :
- `train_file` et `validation_file` spécifient le chemin vers vos fichiers d'entraînement et de validation.
- `text_column` est le texte d'entrée à résumer.
- `summary_column` est le texte cible à produire.
Un exemple de script de résumé utilisant un ensemble de données personnalisé ressemblerait à ceci :
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--train_file path_to_csv_or_jsonlines_file \
--validation_file path_to_csv_or_jsonlines_file \
--text_column text_column_name \
--summary_column summary_column_name \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--overwrite_output_dir \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--predict_with_generate
```
## Tester un script
Il est souvent judicieux d'exécuter votre script sur un plus petit nombre d'exemples de jeu de données pour s'assurer que tout fonctionne comme prévu avant de s'engager sur un jeu de données complet qui pourrait prendre des heures à traiter. Utilisez les arguments suivants pour tronquer le jeu de données à un nombre maximal d'échantillons :
- `max_train_samples`
- `max_eval_samples`
- `max_predict_samples`
```bash
python examples/pytorch/summarization/run_summarization.py \
--model_name_or_path google-t5/t5-small \
--max_train_samples 50 \
--max_eval_samples 50 \
--max_predict_samples 50 \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
```
Tous les scripts d'exemple ne prennent pas en charge l'argument `max_predict_samples`. Si vous n'êtes pas sûr que votre script prenne en charge cet argument, ajoutez l'argument `-h` pour vérifier.
```bash
examples/pytorch/summarization/run_summarization.py -h
```
## Reprendre l'entraînement à partir d'un point de contrÎle
Une autre option utile est de reprendre l'entraînement à partir d'un point de contrÎle précédent. Cela vous permettra de reprendre là où vous vous étiez arrêté sans recommencer si votre entraînement est interrompu. Il existe deux méthodes pour reprendre l'entraînement à partir d'un point de contrÎle.
La premiÚre méthode utilise l'argument `output_dir previous_output_dir` pour reprendre l'entraînement à partir du dernier point de contrÎle stocké dans `output_dir`. Dans ce cas, vous devez supprimer l'argument `overwrite_output_dir`.
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--output_dir previous_output_dir \
--predict_with_generate
```
La seconde méthode utilise l'argument `resume_from_checkpoint path_to_specific_checkpoint` pour reprendre l'entraînement à partir d'un dossier de point de contrÎle spécifique.
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--resume_from_checkpoint path_to_specific_checkpoint \
--predict_with_generate
```
## Partage ton modÚle
Tous les scripts peuvent télécharger votre modÚle final sur le Model Hub. Assurez-vous que vous êtes connecté à Hugging Face avant de commencer :
```bash
huggingface-cli login
```
Ensuite, ajoutez l'argument `push_to_hub` au script. Cet argument créera un dépÎt avec votre nom d'utilisateur Hugging Face et le nom du dossier spécifié dans `output_dir`.
Pour donner un nom spécifique à votre dépÎt, utilisez l'argument `push_to_hub_model_id` pour l'ajouter. Le dépÎt sera automatiquement listé sous votre namespace.
L'exemple suivant montre comment télécharger un modÚle avec un nom de dépÎt spécifique :
```bash
python examples/pytorch/summarization/run_summarization.py
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--dataset_name cnn_dailymail \
--dataset_config "3.0.0" \
--source_prefix "summarize: " \
--push_to_hub \
--push_to_hub_model_id finetuned-t5-cnn_dailymail \
--output_dir /tmp/tst-summarization \
--per_device_train_batch_size=4 \
--per_device_eval_batch_size=4 \
--overwrite_output_dir \
--predict_with_generate
``` | transformers/docs/source/fr/run_scripts_fr.md/0 | {
"file_path": "transformers/docs/source/fr/run_scripts_fr.md",
"repo_id": "transformers",
"token_count": 7046
} | 276 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Preprocess
[[open-in-colab]]
Prima di poter usare i dati in un modello, bisogna processarli in un formato accettabile per quest'ultimo. Un modello non comprende il testo grezzo, le immagini o l'audio. Bisogna convertire questi input in numeri e assemblarli all'interno di tensori. In questa esercitazione, tu potrai:
* Preprocessare dati testuali con un tokenizer.
* Preprocessare immagini o dati audio con un estrattore di caratteristiche.
* Preprocessare dati per attività multimodali mediante un processore.
## NLP
<Youtube id="Yffk5aydLzg"/>
Lo strumento principale per processare dati testuali Ú un [tokenizer](main_classes/tokenizer). Un tokenizer inizia separando il testo in *tokens* secondo una serie di regole. I tokens sono convertiti in numeri, questi vengono utilizzati per costruire i tensori di input del modello. Anche altri input addizionali se richiesti dal modello vengono aggiunti dal tokenizer.
<Tip>
Se stai pensando si utilizzare un modello preaddestrato, Ú importante utilizzare il tokenizer preaddestrato associato. Questo assicura che il testo sia separato allo stesso modo che nel corpus usato per l'addestramento, e venga usata la stessa mappatura tokens-to-index (solitamente indicato come il *vocabolario*) come nel preaddestramento.
</Tip>
Iniziamo subito caricando un tokenizer preaddestrato con la classe [`AutoTokenizer`]. Questo scarica il *vocabolario* usato quando il modello Ú stato preaddestrato.
### Tokenize
Carica un tokenizer preaddestrato con [`AutoTokenizer.from_pretrained`]:
```py
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
```
Poi inserisci le tue frasi nel tokenizer:
```py
>>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.")
>>> print(encoded_input)
{'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
Il tokenizer restituisce un dizionario contenente tre oggetti importanti:
* [input_ids](glossary#input-ids) sono gli indici che corrispondono ad ogni token nella frase.
* [attention_mask](glossary#attention-mask) indicata se un token deve essere elaborato o no.
* [token_type_ids](glossary#token-type-ids) identifica a quale sequenza appartiene un token se Ú presente più di una sequenza.
Si possono decodificare gli `input_ids` per farsi restituire l'input originale:
```py
>>> tokenizer.decode(encoded_input["input_ids"])
'[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]'
```
Come si può vedere, il tokenizer aggiunge due token speciali - `CLS` e `SEP` (classificatore e separatore) - alla frase. Non tutti i modelli hanno bisogno dei token speciali, ma se servono, il tokenizer li aggiungerà automaticamente.
Se ci sono più frasi che vuoi processare, passale come una lista al tokenizer:
```py
>>> batch_sentences = [
... "But what about second breakfast?",
... "Don't think he knows about second breakfast, Pip.",
... "What about elevensies?",
... ]
>>> encoded_inputs = tokenizer(batch_sentences)
>>> print(encoded_inputs)
{'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102],
[101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102],
[101, 1327, 1164, 5450, 23434, 136, 102]],
'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]}
```
### Pad
Questo Ú un argomento importante. Quando processi un insieme di frasi potrebbero non avere tutte la stessa lunghezza. Questo Ú un problema perchÚ i tensori, in input del modello, devono avere dimensioni uniformi. Il padding Ú una strategia per assicurarsi che i tensori siano rettangolari aggiungendo uno speciale *padding token* alle frasi più corte.
Imposta il parametro `padding` a `True` per imbottire le frasi più corte nel gruppo in modo che combacino con la massima lunghezza presente:
```py
>>> batch_sentences = [
... "But what about second breakfast?",
... "Don't think he knows about second breakfast, Pip.",
... "What about elevensies?",
... ]
>>> encoded_input = tokenizer(batch_sentences, padding=True)
>>> print(encoded_input)
{'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0],
[101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102],
[101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]],
'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]}
```
Nota che il tokenizer aggiunge alle sequenze degli `0` perchÚ sono troppo corte!
### Truncation
L'altra faccia della medaglia Ú che avolte le sequenze possono essere troppo lunghe per essere gestite dal modello. In questo caso, avrai bisogno di troncare la sequenza per avere una lunghezza minore.
Imposta il parametro `truncation` a `True` per troncare una sequenza alla massima lunghezza accettata dal modello:
```py
>>> batch_sentences = [
... "But what about second breakfast?",
... "Don't think he knows about second breakfast, Pip.",
... "What about elevensies?",
... ]
>>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True)
>>> print(encoded_input)
{'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0],
[101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102],
[101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]],
'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]}
```
### Costruire i tensori
Infine, vuoi che il tokenizer restituisca i tensori prodotti dal modello.
Imposta il parametro `return_tensors` su `pt` per PyTorch, o `tf` per TensorFlow:
```py
>>> batch_sentences = [
... "But what about second breakfast?",
... "Don't think he knows about second breakfast, Pip.",
... "What about elevensies?",
... ]
>>> encoded_input = tokenizer(batch, padding=True, truncation=True, return_tensors="pt")
>>> print(encoded_input)
{'input_ids': tensor([[ 101, 153, 7719, 21490, 1122, 1114, 9582, 1623, 102],
[ 101, 5226, 1122, 9649, 1199, 2610, 1236, 102, 0]]),
'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]),
'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 0]])}
===PT-TF-SPLIT===
>>> batch_sentences = [
... "But what about second breakfast?",
... "Don't think he knows about second breakfast, Pip.",
... "What about elevensies?",
... ]
>>> encoded_input = tokenizer(batch, padding=True, truncation=True, return_tensors="tf")
>>> print(encoded_input)
{'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[ 101, 153, 7719, 21490, 1122, 1114, 9582, 1623, 102],
[ 101, 5226, 1122, 9649, 1199, 2610, 1236, 102, 0]],
dtype=int32)>,
'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>,
'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 0]], dtype=int32)>}
```
## Audio
Gli input audio sono processati in modo differente rispetto al testo, ma l'obiettivo rimane lo stesso: creare sequenze numeriche che il modello può capire. Un [estrattore di caratteristiche](main_classes/feature_extractor) Ú progettato con lo scopo preciso di estrarre caratteristiche da immagini o dati audio grezzi e convertirli in tensori. Prima di iniziare, installa ð€ Datasets per caricare un dataset audio e sperimentare:
```bash
pip install datasets
```
Carica il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) (vedi il ð€ [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) per avere maggiori dettagli su come caricare un dataset):
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train")
```
Accedi al primo elemento della colonna `audio` per dare uno sguardo all'input. Richiamando la colonna `audio` sarà caricato automaticamente e ricampionato il file audio:
```py
>>> dataset[0]["audio"]
{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414,
0. , 0. ], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 8000}
```
Questo restituisce tre oggetti:
* `array` Ú il segnale vocale caricato - e potenzialmente ricampionato - come vettore 1D.
* `path` il percorso del file audio.
* `sampling_rate` si riferisce al numero di campioni del segnale vocale misurati al secondo.
### Ricampionamento
Per questo tutorial, puoi usare il modello [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base). Come puoi vedere dalla model card, il modello Wav2Vec2 Ú preaddestrato su un campionamento vocale a 16kHz.à importante che la frequenza di campionamento dei tuoi dati audio combaci con la frequenza di campionamento del dataset usato per preaddestrare il modello. Se la frequenza di campionamento dei tuoi dati non Ú uguale dovrai ricampionare i tuoi dati audio.
Per esempio, il dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) ha una frequenza di campionamento di 8000kHz. Utilizzando il modello Wav2Vec2 su questo dataset, alzala a 16kHz:
```py
>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train")
>>> dataset[0]["audio"]
{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414,
0. , 0. ], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 8000}
```
1. Usa il metodo di ð€ Datasets' [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.cast_column) per alzare la frequenza di campionamento a 16kHz:
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))
```
2. Carica il file audio:
```py
>>> dataset[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
Come puoi notare, la `sampling_rate` adesso Ú 16kHz!
### Feature extractor
Il prossimo passo Ú caricare un estrattore di caratteristiche per normalizzare e fare padding sull'input. Quando applichiamo il padding sui dati testuali, uno `0` Ú aggiunto alle sequenze più brevi. La stessa idea si applica ai dati audio, l'estrattore di caratteristiche per gli audio aggiungerà uno `0` - interpretato come silenzio - agli `array`.
Carica l'estrattore delle caratteristiche con [`AutoFeatureExtractor.from_pretrained`]:
```py
>>> from transformers import AutoFeatureExtractor
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")
```
Inserisci l' `array` audio nell'estrattore delle caratteristiche. Noi raccomandiamo sempre di aggiungere il parametro `sampling_rate` nell'estrattore delle caratteristiche per correggere meglio qualche errore, dovuto ai silenzi, che potrebbe verificarsi.
```py
>>> audio_input = [dataset[0]["audio"]["array"]]
>>> feature_extractor(audio_input, sampling_rate=16000)
{'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ...,
5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]}
```
### Pad e truncate
Come per il tokenizer, puoi applicare le operazioni padding o truncation per manipolare sequenze di variabili a lotti. Dai uno sguaro alla lunghezza delle sequenze di questi due campioni audio:
```py
>>> dataset[0]["audio"]["array"].shape
(173398,)
>>> dataset[1]["audio"]["array"].shape
(106496,)
```
Come puoi vedere, il primo campione ha una sequenza più lunga del secondo. Crea una funzione che preprocesserà il dataset. Specifica una lunghezza massima del campione, e l'estrattore di features si occuperà di riempire o troncare la sequenza per coincidervi:
```py
>>> def preprocess_function(examples):
... audio_arrays = [x["array"] for x in examples["audio"]]
... inputs = feature_extractor(
... audio_arrays,
... sampling_rate=16000,
... padding=True,
... max_length=100000,
... truncation=True,
... )
... return inputs
```
Applica la funzione ai primi esempi nel dataset:
```py
>>> processed_dataset = preprocess_function(dataset[:5])
```
Adesso guarda la lunghezza dei campioni elaborati:
```py
>>> processed_dataset["input_values"][0].shape
(100000,)
>>> processed_dataset["input_values"][1].shape
(100000,)
```
La lunghezza dei campioni adesso coincide con la massima lunghezza impostata nelle funzione.
## Vision
Un estrattore di caratteristiche si può usare anche per processare immagini e per compiti di visione. Ancora una volta, l'obiettivo Ú convertire l'immagine grezza in un lotto di tensori come input.
Carica il dataset [food101](https://huggingface.co/datasets/food101) per questa esercitazione. Usa il parametro `split` di ð€ Datasets per caricare solo un piccolo campione dal dataset di addestramento poichÚ il set di dati Ú molto grande:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("food101", split="train[:100]")
```
Secondo passo, dai uno sguardo alle immagini usando la caratteristica [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image) di ð€ Datasets:
```py
>>> dataset[0]["image"]
```

### Feature extractor
Carica l'estrattore di caratteristiche [`AutoFeatureExtractor.from_pretrained`]:
```py
>>> from transformers import AutoFeatureExtractor
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
```
### Data augmentation
Per le attività di visione, Ú usuale aggiungere alcuni tipi di data augmentation alle immagini come parte del preprocessing. Puoi aggiungere augmentations con qualsiasi libreria che preferisci, ma in questa esercitazione, userai il modulo [`transforms`](https://pytorch.org/vision/stable/transforms.html) di torchvision.
1. Normalizza l'immagine e usa [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html) per concatenare alcune trasformazioni - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) e [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) - insieme:
```py
>>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor
>>> normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
>>> _transforms = Compose(
... [RandomResizedCrop(feature_extractor.size), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize]
... )
```
2. Il modello accetta [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) come input. Questo valore Ú generato dall'estrattore di caratteristiche. Crea una funzione che genera `pixel_values` dai transforms:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [_transforms(image.convert("RGB")) for image in examples["image"]]
... return examples
```
3. Poi utilizza ð€ Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)per applicare al volo la trasformazione:
```py
>>> dataset.set_transform(transforms)
```
4. Adesso quando accedi all'immagine, puoi notare che l'estrattore di caratteristiche ha aggiunto `pixel_values` allo schema di input:
```py
>>> dataset[0]["image"]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F1A7B0630D0>,
'label': 6,
'pixel_values': tensor([[[ 0.0353, 0.0745, 0.1216, ..., -0.9922, -0.9922, -0.9922],
[-0.0196, 0.0667, 0.1294, ..., -0.9765, -0.9843, -0.9922],
[ 0.0196, 0.0824, 0.1137, ..., -0.9765, -0.9686, -0.8667],
...,
[ 0.0275, 0.0745, 0.0510, ..., -0.1137, -0.1216, -0.0824],
[ 0.0667, 0.0824, 0.0667, ..., -0.0588, -0.0745, -0.0980],
[ 0.0353, 0.0353, 0.0431, ..., -0.0039, -0.0039, -0.0588]],
[[ 0.2078, 0.2471, 0.2863, ..., -0.9451, -0.9373, -0.9451],
[ 0.1608, 0.2471, 0.3098, ..., -0.9373, -0.9451, -0.9373],
[ 0.2078, 0.2706, 0.3020, ..., -0.9608, -0.9373, -0.8275],
...,
[-0.0353, 0.0118, -0.0039, ..., -0.2392, -0.2471, -0.2078],
[ 0.0196, 0.0353, 0.0196, ..., -0.1843, -0.2000, -0.2235],
[-0.0118, -0.0039, -0.0039, ..., -0.0980, -0.0980, -0.1529]],
[[ 0.3961, 0.4431, 0.4980, ..., -0.9216, -0.9137, -0.9216],
[ 0.3569, 0.4510, 0.5216, ..., -0.9059, -0.9137, -0.9137],
[ 0.4118, 0.4745, 0.5216, ..., -0.9137, -0.8902, -0.7804],
...,
[-0.2314, -0.1922, -0.2078, ..., -0.4196, -0.4275, -0.3882],
[-0.1843, -0.1686, -0.2000, ..., -0.3647, -0.3804, -0.4039],
[-0.1922, -0.1922, -0.1922, ..., -0.2941, -0.2863, -0.3412]]])}
```
Di seguito come si vede l'immagine dopo la fase di preprocessing. Come ci si aspetterebbe dalle trasformazioni applicate, l'immagine Ú stata ritagliata in modo casuale e le proprietà del colore sono diverse.
```py
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> img = dataset[0]["pixel_values"]
>>> plt.imshow(img.permute(1, 2, 0))
```

## Multimodal
Per attività multimodali userai una combinazione di tutto quello che hai imparato poco fa e applicherai le tue competenze alla comprensione automatica del parlato (Automatic Speech Recognition - ASR). Questo significa che avrai bisogno di:
* Un estrattore delle caratteristiche per processare i dati audio.
* Il Tokenizer per processare i testi.
Ritorna sul datasere [LJ Speech](https://huggingface.co/datasets/lj_speech):
```py
>>> from datasets import load_dataset
>>> lj_speech = load_dataset("lj_speech", split="train")
```
Visto che sei interessato solo alle colonne `audio` e `text`, elimina tutte le altre:
```py
>>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"])
```
Adesso guarda le colonne `audio` e `text`:
```py
>>> lj_speech[0]["audio"]
{'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ...,
7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav',
'sampling_rate': 22050}
>>> lj_speech[0]["text"]
'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition'
```
Ricorda dalla sezione precedente sull'elaborazione dei dati audio, tu dovresti sempre [ricampionare](preprocessing#audio) la frequenza di campionamento dei tuoi dati audio per farla coincidere con quella del dataset usato dal modello preaddestrato:
```py
>>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000))
```
### Processor
Un processor combina un estrattore di caratteristiche e un tokenizer. Carica un processor con [`AutoProcessor.from_pretrained`]:
```py
>>> from transformers import AutoProcessor
>>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
```
1. Crea una funzione che processi i dati audio in `input_values`, e tokenizza il testo in `labels`. Questi sono i tuoi input per il modello:
```py
>>> def prepare_dataset(example):
... audio = example["audio"]
... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000))
... return example
```
2. Applica la funzione `prepare_dataset` ad un campione:
```py
>>> prepare_dataset(lj_speech[0])
```
Nota che il processor ha aggiunto `input_values` e `labels`. La frequenza di campionamento Ú stata corretta riducendola a 16kHz.
Fantastico, ora dovresti essere in grado di preelaborare i dati per qualsiasi modalità e persino di combinare modalità diverse! Nella prossima esercitazione, impareremo a mettere a punto un modello sui dati appena pre-elaborati. | transformers/docs/source/it/preprocessing.md/0 | {
"file_path": "transformers/docs/source/it/preprocessing.md",
"repo_id": "transformers",
"token_count": 9562
} | 277 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Sharing custom models
ð€ Transformersã©ã€ãã©ãªã¯ãç°¡åã«æ¡åŒµã§ããããã«èšèšãããŠããŸãããã¹ãŠã®ã¢ãã«ã¯ãªããžããªã®ç¹å®ã®ãµããã©ã«ãã«å®å
šã«ã³ãŒãåãããŠãããæœè±¡åã¯ãããŸããããããã£ãŠãã¢ããªã³ã°ãã¡ã€ã«ãã³ããŒããŠèª¿æŽããããšãç°¡åã§ãã
æ°ããã¢ãã«ãæžããŠããå ŽåããŒãããå§ããæ¹ãç°¡åãããããŸããããã®ãã¥ãŒããªã¢ã«ã§ã¯ãã«ã¹ã¿ã ã¢ãã«ãšãã®èšå®ãã©ã®ããã«æžããTransformerså
ã§äœ¿çšã§ããããã«ããã³ãŒãã«äŸåããå
±åäœãšå
±æããæ¹æ³ã説æããŸããã©ã€ãã©ãªã«ååšããªãå Žåã§ãã誰ã§ã䜿çšã§ããããã«ããŸãã
ãããå®èšŒããããã«ã[timmã©ã€ãã©ãª](https://github.com/rwightman/pytorch-image-models)ã®ResNetã¯ã©ã¹ã[`PreTrainedModel`]ã«ã©ããããããšã«ãã£ãŠãResNetã¢ãã«ã䜿çšããŸãã
## Writing a custom configuration
ã¢ãã«ã«åãçµãåã«ããŸããã®èšå®ãæžããŸããããã¢ãã«ã®èšå®ã¯ãã¢ãã«ãæ§ç¯ããããã«å¿
èŠãªãã¹ãŠã®æ
å ±ãå«ããªããžã§ã¯ãã§ãã次ã®ã»ã¯ã·ã§ã³ã§èŠãããã«ãã¢ãã«ã¯åæåããããã«`config`ããåãåãããšãã§ããªãããããã®ãªããžã§ã¯ããã§ããã ãå®å
šã§ããå¿
èŠããããŸãã
ãã®äŸã§ã¯ãResNetã¯ã©ã¹ã®ããã€ãã®åŒæ°ãååŸãã調æŽããããããããªããšããŸããç°ãªãèšå®ã¯ãç°ãªãã¿ã€ãã®ResNetãæäŸããŸãããã®åŸããããã®åŒæ°ã確èªããåŸããããã®åŒæ°ãåã«æ ŒçŽããŸãã
```python
from transformers import PretrainedConfig
from typing import List
class ResnetConfig(PretrainedConfig):
model_type = "resnet"
def __init__(
self,
block_type="bottleneck",
layers: List[int] = [3, 4, 6, 3],
num_classes: int = 1000,
input_channels: int = 3,
cardinality: int = 1,
base_width: int = 64,
stem_width: int = 64,
stem_type: str = "",
avg_down: bool = False,
**kwargs,
):
if block_type not in ["basic", "bottleneck"]:
raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
if stem_type not in ["", "deep", "deep-tiered"]:
raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
self.block_type = block_type
self.layers = layers
self.num_classes = num_classes
self.input_channels = input_channels
self.cardinality = cardinality
self.base_width = base_width
self.stem_width = stem_width
self.stem_type = stem_type
self.avg_down = avg_down
super().__init__(**kwargs)
```
éèŠãªããšã3ã€èŠããŠããã¹ããã€ã³ãã¯æ¬¡ã®ãšããã§ãïŒ
- `PretrainedConfig` ãç¶æ¿ããå¿
èŠããããŸãã
- ããªãã® `PretrainedConfig` ã® `__init__` ã¯ä»»æã® kwargs ãåãå
¥ããå¿
èŠããããŸãã
- ãããã® `kwargs` ã¯èŠªã¯ã©ã¹ã® `__init__` ã«æž¡ãå¿
èŠããããŸãã
ç¶æ¿ã¯ãð€ Transformers ã©ã€ãã©ãªã®ãã¹ãŠã®æ©èœãååŸã§ããããã«ããããã§ããä»ã®2ã€ã®å¶çŽã¯ã
`PretrainedConfig` ãèšå®ããŠãããã£ãŒã«ã以å€ã«ãå€ãã®ãã£ãŒã«ããæã£ãŠããããšããæ¥ãŠããŸãã
`from_pretrained` ã¡ãœããã§èšå®ãåããŒãããå Žåããããã®ãã£ãŒã«ãã¯ããªãã®èšå®ã«åãå
¥ãããã
ãã®åŸã芪ã¯ã©ã¹ã«éä¿¡ãããå¿
èŠããããŸãã
èšå®ã® `model_type` ãå®çŸ©ããããšïŒããã§ã¯ `model_type="resnet"`ïŒã¯ã
èªåã¯ã©ã¹ã«ã¢ãã«ãç»é²ãããå Žåãé€ããŠã¯å¿
é ã§ã¯ãããŸããïŒæåŸã®ã»ã¯ã·ã§ã³ãåç
§ïŒã
ããã§ãã©ã€ãã©ãªã®ä»ã®ã¢ãã«èšå®ãšåæ§ã«ãèšå®ãç°¡åã«äœæããŠä¿åã§ããŸãã
以äžã¯ãresnet50d èšå®ãäœæããŠä¿åããæ¹æ³ã®äŸã§ãïŒ
```py
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
resnet50d_config.save_pretrained("custom-resnet")
```
ããã«ããã`custom-resnet` ãã©ã«ãå
ã« `config.json` ãšããååã®ãã¡ã€ã«ãä¿åãããŸãããã®åŸã`from_pretrained` ã¡ãœããã䜿çšããŠæ§æãåããŒãã§ããŸãã
```py
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
```
ãŸãã[`PretrainedConfig`] ã¯ã©ã¹ã®ä»ã®ã¡ãœããã䜿çšããããšãã§ããŸããããšãã°ã[`~PretrainedConfig.push_to_hub`] ã䜿çšããŠãèšå®ãçŽæ¥ Hub ã«ã¢ããããŒãã§ããŸãã
## Writing a custom model
ResNet ã®èšå®ãã§ããã®ã§ãã¢ãã«ãæžãå§ããããšãã§ããŸããå®éã«ã¯2ã€ã®ã¢ãã«ãæžããŸãã1ã€ã¯ãããã®ç»åããé ããç¹åŸŽãæœåºããã¢ãã«ïŒ[`BertModel`] ã®ãããªãã®ïŒã§ããã1ã€ã¯ç»ååé¡ã«é©ããã¢ãã«ïŒ[`BertForSequenceClassification`] ã®ãããªãã®ïŒã§ãã
åè¿°ããããã«ããã®äŸãã·ã³ãã«ã«ä¿ã€ããã«ãã¢ãã«ã®ç·©ãã©ãããŒã®ã¿ãæžããŸãããã®ã¯ã©ã¹ãæžãåã«è¡ãå¿
èŠãããå¯äžã®ããšã¯ããããã¯ã¿ã€ããšå®éã®ãããã¯ã¯ã©ã¹ã®éã®ãããã§ãããã®åŸããã¹ãŠã `ResNet` ã¯ã©ã¹ã«æž¡ããŠèšå®ããã¢ãã«ãå®çŸ©ããŸãïŒ
```py
from transformers import PreTrainedModel
from timm.models.resnet import BasicBlock, Bottleneck, ResNet
from .configuration_resnet import ResnetConfig
BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck}
class ResnetModel(PreTrainedModel):
config_class = ResnetConfig
def __init__(self, config):
super().__init__(config)
block_layer = BLOCK_MAPPING[config.block_type]
self.model = ResNet(
block_layer,
config.layers,
num_classes=config.num_classes,
in_chans=config.input_channels,
cardinality=config.cardinality,
base_width=config.base_width,
stem_width=config.stem_width,
stem_type=config.stem_type,
avg_down=config.avg_down,
)
def forward(self, tensor):
return self.model.forward_features(tensor)
```
ç»åãåé¡ããã¢ãã«ã®å Žåãforwardã¡ãœãããå€æŽããã ãã§ãïŒ
```py
import torch
class ResnetModelForImageClassification(PreTrainedModel):
config_class = ResnetConfig
def __init__(self, config):
super().__init__(config)
block_layer = BLOCK_MAPPING[config.block_type]
self.model = ResNet(
block_layer,
config.layers,
num_classes=config.num_classes,
in_chans=config.input_channels,
cardinality=config.cardinality,
base_width=config.base_width,
stem_width=config.stem_width,
stem_type=config.stem_type,
avg_down=config.avg_down,
)
def forward(self, tensor, labels=None):
logits = self.model(tensor)
if labels is not None:
loss = torch.nn.functional.cross_entropy(logits, labels)
return {"loss": loss, "logits": logits}
return {"logits": logits}
```
äž¡æ¹ã®å Žåã`PreTrainedModel`ããç¶æ¿ãã`config`ã䜿çšããŠã¹ãŒããŒã¯ã©ã¹ã®åæåãåŒã³åºããŸãïŒéåžžã®`torch.nn.Module`ãæžããšãã®ãããªæãã§ãïŒã
`config_class`ãèšå®ããè¡ã¯å¿
é ã§ã¯ãããŸããããïŒæåŸã®ã»ã¯ã·ã§ã³ãåç
§ïŒãã¢ãã«ãèªåã¯ã©ã¹ã«ç»é²ãããå Žåã«äœ¿çšã§ããŸãã
<Tip>
ã¢ãã«ãã©ã€ãã©ãªå
ã®ã¢ãã«ãšéåžžã«äŒŒãŠããå Žåããã®ã¢ãã«ãšåãæ§æãåå©çšã§ããŸãã
</Tip>
ã¢ãã«ãè¿ãå
容ã¯äœã§ãæ§ããŸããããã©ãã«ãæž¡ããããšãã«æ倱ãå«ãèŸæžãè¿ãïŒ`ResnetModelForImageClassification`ã®ããã«è¡ã£ããã®ïŒãšã
ã¢ãã«ã[`Trainer`]ã¯ã©ã¹å
ã§çŽæ¥äœ¿çšã§ããããã«ãªããŸããç¬èªã®ãã¬ãŒãã³ã°ã«ãŒããŸãã¯ä»ã®ã©ã€ãã©ãªã䜿çšããäºå®ã§ããéãã
å¥ã®åºå圢åŒã䜿çšããããšãåé¡ãããŸããã
ããŠãã¢ãã«ã¯ã©ã¹ãã§ããã®ã§ã1ã€äœæããŸãããïŒ
```py
resnet50d = ResnetModelForImageClassification(resnet50d_config)
```
å床ã[`PreTrainedModel`]ã®ããããã®ã¡ãœãããäŸãã°[`~PreTrainedModel.save_pretrained`]ã
[`~PreTrainedModel.push_to_hub`]ãªã©ã䜿çšã§ããŸãã次ã®ã»ã¯ã·ã§ã³ã§ã¯ãã¢ãã«ã®éã¿ãã³ãŒããšäžç·ã«
Hugging Face Hub ã«ããã·ã¥ããæ¹æ³ãèŠãŠã¿ãŸãã
ãããããŸãã¯ã¢ãã«å
ã«äºååŠç¿æžã¿ã®éã¿ãããŒãããŸãããã
ç¬èªã®ãŠãŒã¹ã±ãŒã¹ã§ã¯ãããããç¬èªã®ããŒã¿ã§ã«ã¹ã¿ã ã¢ãã«ããã¬ãŒãã³ã°ããããšã«ãªãã§ãããã
ãã®ãã¥ãŒããªã¢ã«ã§ã¯ã¹ããŒãã¢ããã®ããã«ãresnet50dã®äºååŠç¿æžã¿ããŒãžã§ã³ã䜿çšããŸãã
ç§ãã¡ã®ã¢ãã«ã¯ãããã©ããããã ããªã®ã§ããããã®éã¿ã転éããã®ã¯ç°¡åã§ãïŒ
```py
import timm
pretrained_model = timm.create_model("resnet50d", pretrained=True)
resnet50d.model.load_state_dict(pretrained_model.state_dict())
```
ããŠã[`~PreTrainedModel.save_pretrained`]ãŸãã¯[`~PreTrainedModel.push_to_hub`]ãå®è¡ãããšãã«ã
ã¢ãã«ã®ã³ãŒããä¿åãããããã«ããæ¹æ³ãèŠãŠã¿ãŸãããã
## Sending the code to the Hub
<Tip warning={true}>
ãã®APIã¯å®éšçã§ããã次ã®ãªãªãŒã¹ã§ããããªå€æŽããããããããŸããã
</Tip>
ãŸããã¢ãã«ã`.py`ãã¡ã€ã«ã«å®å
šã«å®çŸ©ãããŠããããšã確èªããŠãã ããã
ãã¡ã€ã«ã¯çžå¯Ÿã€ã³ããŒããä»ã®ãã¡ã€ã«ã«äŸåã§ããŸããããã¹ãŠã®ãã¡ã€ã«ãåããã£ã¬ã¯ããªã«ããéãïŒãŸã ãã®æ©èœã§ã¯ãµãã¢ãžã¥ãŒã«ã¯ãµããŒãããŠããŸããïŒãåé¡ãããŸããã
ãã®äŸã§ã¯ãçŸåšã®äœæ¥ãã£ã¬ã¯ããªå
ã«ååããresnet_modelãã®ãã©ã«ããäœæãããã®äžã«`modeling_resnet.py`ãã¡ã€ã«ãš`configuration_resnet.py`ãã¡ã€ã«ãå®çŸ©ããŸãã
æ§æãã¡ã€ã«ã«ã¯`ResnetConfig`ã®ã³ãŒããå«ãŸããã¢ããªã³ã°ãã¡ã€ã«ã«ã¯`ResnetModel`ãš`ResnetModelForImageClassification`ã®ã³ãŒããå«ãŸããŠããŸãã
```
.
âââ resnet_model
âââ __init__.py
âââ configuration_resnet.py
âââ modeling_resnet.py
```
`__init__.py`ã¯ç©ºã§ãã£ãŠãåé¡ãããŸãããPythonã`resnet_model`ãã¢ãžã¥ãŒã«ãšããŠæ€åºã§ããããã«ããããã«ååšããŸãã
<Tip warning={true}>
ã©ã€ãã©ãªããã¢ããªã³ã°ãã¡ã€ã«ãã³ããŒããå Žåããã¡ã€ã«ã®å
é ã«ãããã¹ãŠã®çžå¯Ÿã€ã³ããŒãã`transformers`ããã±ãŒãžããã€ã³ããŒãã«çœ®ãæããå¿
èŠããããŸãã
</Tip>
æ¢åã®èšå®ãã¢ãã«ãåå©çšïŒãŸãã¯ãµãã¯ã©ã¹åïŒã§ããããšã«æ³šæããŠãã ããã
ã³ãã¥ããã£ãšã¢ãã«ãå
±æããããã«ã次ã®æé ã«åŸã£ãŠãã ããïŒãŸããæ°ããäœæãããã¡ã€ã«ããResNetã¢ãã«ãšèšå®ãã€ã³ããŒãããŸãïŒ
```py
from resnet_model.configuration_resnet import ResnetConfig
from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification
```
次ã«ã`save_pretrained`ã¡ãœããã䜿çšããŠãããã®ãªããžã§ã¯ãã®ã³ãŒããã¡ã€ã«ãã³ããŒããç¹å®ã®Autoã¯ã©ã¹ïŒç¹ã«ã¢ãã«ã®å ŽåïŒã«æ£ããç»é²ããããã©ã€ãã©ãªã«æ瀺ããå¿
èŠããããŸãã次ã®ããã«å®è¡ããŸãïŒ
```py
ResnetConfig.register_for_auto_class()
ResnetModel.register_for_auto_class("AutoModel")
ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification")
```
泚æ: èšå®ã«ã€ããŠã¯èªåã¯ã©ã¹ãæå®ããå¿
èŠã¯ãããŸããïŒèšå®çšã®èªåã¯ã©ã¹ã¯1ã€ãããªãã[`AutoConfig`]ã§ãïŒãã
ã¢ãã«ã«ã€ããŠã¯ç°ãªããŸããã«ã¹ã¿ã ã¢ãã«ã¯å€ãã®ç°ãªãã¿ã¹ã¯ã«é©ããŠããå¯èœæ§ãããããã
ã¢ãã«ãæ£ç¢ºãªèªåã¯ã©ã¹ã®ãã¡ã©ãã«é©ããŠããããæå®ããå¿
èŠããããŸãã
次ã«ãåè¿°ã®ããã«èšå®ãšã¢ãã«ãäœæããŸãããïŒ
```py
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
resnet50d = ResnetModelForImageClassification(resnet50d_config)
pretrained_model = timm.create_model("resnet50d", pretrained=True)
resnet50d.model.load_state_dict(pretrained_model.state_dict())
```
ã¢ãã«ãHubã«éä¿¡ããã«ã¯ããã°ã€ã³ããŠããããšã確èªããŠãã ãããã¿ãŒããã«ã§æ¬¡ã®ã³ãã³ããå®è¡ããŸãïŒ
```bash
huggingface-cli login
```
ãŸãã¯ããŒãããã¯ããïŒ
```py
from huggingface_hub import notebook_login
notebook_login()
```
次ã«ã次ã®ããã«ããŠãç¬èªã®åå空éã«ããã·ã¥ã§ããŸãïŒãŸãã¯ãã¡ã³ããŒã§ããçµç¹ã«ããã·ã¥ã§ããŸãïŒïŒ
```py
resnet50d.push_to_hub("custom-resnet50d")
```
ã¢ããªã³ã°ã®éã¿ãšJSON圢åŒã®æ§æã«å ããŠããã®ãã©ã«ããŒãcustom-resnet50dãå
ã®ã¢ããªã³ã°ããã³æ§æã.pyããã¡ã€ã«ãã³ããŒãããçµæã¯Hubã«ã¢ããããŒããããŸãããçµæã¯ãã®[model repo](https://huggingface.co/sgugger/custom-resnet50d)ã§ç¢ºèªã§ããŸãã
詳现ã«ã€ããŠã¯ã[Hubãžã®ããã·ã¥æ¹æ³](model_sharing)ãåç
§ããŠãã ããã
## Using a model with custom code
èªåã¯ã©ã¹ãš `from_pretrained` ã¡ãœããã䜿çšããŠããªããžããªå
ã®ã«ã¹ã¿ã ã³ãŒããã¡ã€ã«ãšå
±ã«ä»»æã®æ§æãã¢ãã«ããŸãã¯ããŒã¯ãã€ã¶ã䜿çšã§ããŸãã Hubã«ã¢ããããŒãããããã¹ãŠã®ãã¡ã€ã«ãšã³ãŒãã¯ãã«ãŠã§ã¢ã®ã¹ãã£ã³ãå®æœãããŸãïŒè©³çŽ°ã¯[Hubã»ãã¥ãªãã£](https://huggingface.co/docs/hub/security#malware-scanning)ããã¥ã¡ã³ããŒã·ã§ã³ãåç
§ããŠãã ããïŒãããããäŸç¶ãšããŠæªæã®ããã³ãŒããå®è¡ããªãããã«ãã¢ãã«ã³ãŒããšäœè
ã確èªããå¿
èŠããããŸãã
`trust_remote_code=True` ãèšå®ããŠã«ã¹ã¿ã ã³ãŒããæã€ã¢ãã«ã䜿çšã§ããŸãïŒ
```py
from transformers import AutoModelForImageClassification
model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True)
```
ã³ãããããã·ã¥ããrevisionããšããŠæž¡ãããšã匷ãæšå¥šãããŠããŸããããã«ãããã¢ãã«ã®äœè
ãã³ãŒããæªæã®ããæ°ããè¡ã§æŽæ°ããªãã£ãããšã確èªã§ããŸãïŒã¢ãã«ã®äœè
ãå®å
šã«ä¿¡é ŒããŠããå Žåãé€ããŸãïŒã
```py
commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292"
model = AutoModelForImageClassification.from_pretrained(
"sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash
)
```
ã¢ãã«ãªããžããªã®ã³ãããå±¥æŽããã©ãŠãžã³ã°ããéã«ã¯ãä»»æã®ã³ãããã®ã³ãããããã·ã¥ãç°¡åã«ã³ããŒã§ãããã¿ã³ããããŸãã
## Registering a model with custom code to the auto classes
ð€ Transformersãæ¡åŒµããã©ã€ãã©ãªãäœæããŠããå Žåãç¬èªã®ã¢ãã«ãå«ããããã«èªåã¯ã©ã¹ãæ¡åŒµãããå ŽåããããŸãã
ããã¯ã³ãŒããHubã«ããã·ã¥ããããšãšã¯ç°ãªãããŠãŒã¶ãŒã¯ã«ã¹ã¿ã ã¢ãã«ãååŸããããã«ããªãã®ã©ã€ãã©ãªãã€ã³ããŒãããå¿
èŠããããŸã
ïŒHubããã¢ãã«ã³ãŒããèªåçã«ããŠã³ããŒãããã®ãšã¯å¯Ÿç
§çã§ãïŒã
æ§æã«æ¢åã®ã¢ãã«ã¿ã€ããšç°ãªã `model_type` å±æ§ãããéãããŸãããªãã®ã¢ãã«ã¯ã©ã¹ãé©å㪠`config_class` å±æ§ãæã£ãŠããéãã
次ã®ããã«ããããèªåã¯ã©ã¹ã«è¿œå ã§ããŸãïŒ
```py
from transformers import AutoConfig, AutoModel, AutoModelForImageClassification
AutoConfig.register("resnet", ResnetConfig)
AutoModel.register(ResnetConfig, ResnetModel)
AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)
```
泚æ: `AutoConfig` ã«ã«ã¹ã¿ã èšå®ãç»é²ããéã®æåã®åŒæ°ã¯ãã«ã¹ã¿ã èšå®ã® `model_type` ãšäžèŽããå¿
èŠããããŸãã
ãŸããä»»æã®èªåã¢ãã«ã¯ã©ã¹ã«ã«ã¹ã¿ã ã¢ãã«ãç»é²ããéã®æåã®åŒæ°ã¯ããããã®ã¢ãã«ã® `config_class` ãšäžèŽããå¿
èŠããããŸãã
| transformers/docs/source/ja/custom_models.md/0 | {
"file_path": "transformers/docs/source/ja/custom_models.md",
"repo_id": "transformers",
"token_count": 7503
} | 278 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
â ïž Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Processors
Transformers ã©ã€ãã©ãªã§ã¯ãããã»ããµã¯ 2 ã€ã®ç°ãªãæå³ãæã¡ãŸãã
- [Wav2Vec2](../model_doc/wav2vec2) ãªã©ã®ãã«ãã¢ãŒãã« ã¢ãã«ã®å
¥åãååŠçãããªããžã§ã¯ã (é³å£°ãšããã¹ã)
ãŸã㯠[CLIP](../model_doc/clip) (ããã¹ããšããžã§ã³)
- å€ãããŒãžã§ã³ã®ã©ã€ãã©ãªã§ GLUE ãŸã㯠SQUAD ã®ããŒã¿ãååŠçããããã«äœ¿çšãããŠãããªããžã§ã¯ãã¯éæšå¥šã«ãªããŸããã
## Multi-modal processors
ãã«ãã¢ãŒãã« ã¢ãã«ã§ã¯ããªããžã§ã¯ããè€æ°ã®ã¢ããªã㣠(ããã¹ãã
èŠèŠãšé³å£°ïŒãããã¯ã2 ã€ä»¥äžã®åŠçãªããžã§ã¯ããã°ã«ãŒãåããããã»ããµãŒãšåŒã°ãããªããžã§ã¯ãã«ãã£ãŠåŠçãããŸãã
ããŒã¯ãã€ã¶ãŒ (ããã¹ã ã¢ããªãã£çš)ãç»åããã»ããµãŒ (èŠèŠçš)ãç¹åŸŽæœåºåš (ãªãŒãã£ãªçš) ãªã©ã
ãããã®ããã»ããµã¯ãä¿åããã³ããŒãæ©èœãå®è£
ãã次ã®åºæ¬ã¯ã©ã¹ãç¶æ¿ããŸãã
[[autodoc]] ProcessorMixin
## Deprecated processors
ãã¹ãŠã®ããã»ããµã¯ãåãã¢ãŒããã¯ãã£ã«åŸã£ãŠããŸãã
[`~data.processors.utils.DataProcessor`]ãããã»ããµã¯æ¬¡ã®ãªã¹ããè¿ããŸãã
[`~data.processors.utils.InputExample`]ãããã
[`~data.processors.utils.InputExample`] ã¯æ¬¡ã®ããã«å€æã§ããŸãã
[`~data.processors.utils.Input features`] ãã¢ãã«ã«ãã£ãŒãããŸãã
[[autodoc]] data.processors.utils.DataProcessor
[[autodoc]] data.processors.utils.InputExample
[[autodoc]] data.processors.utils.InputFeatures
## GLUE
[äžè¬èšèªç解è©äŸ¡ (GLUE)](https://gluebenchmark.com/) ã¯ã
æ¢åã® NLU ã¿ã¹ã¯ã®å€æ§ãªã»ããã«ãããã¢ãã«ã®ããã©ãŒãã³ã¹ãçŽãšåæçºå£²ããã [GLUE: A
èªç¶èšèªç解ã®ããã®ãã«ãã¿ã¹ã¯ãã³ãããŒã¯ããã³åæãã©ãããã©ãŒã ](https://openreview.net/pdf?id=rJ4km2R5t7)
ãã®ã©ã€ãã©ãªã¯ãMRPCãMNLIãMNLI (äžäžèŽ)ãCoLAãSST2ãSTSBã
QQPãQNLIãRTEãWNLIã
ãããã®ããã»ããµã¯æ¬¡ã®ãšããã§ãã
- [`~data.processors.utils.MrpcProcessor`]
- [`~data.processors.utils.MnliProcessor`]
- [`~data.processors.utils.MnliMismatchedProcessor`]
- [`~data.processors.utils.Sst2Processor`]
- [`~data.processors.utils.StsbProcessor`]
- [`~data.processors.utils.QqpProcessor`]
- [`~data.processors.utils.QnliProcessor`]
- [`~data.processors.utils.RteProcessor`]
- [`~data.processors.utils.WnliProcessor`]
ããã«ã次ã®ã¡ãœããã䜿çšããŠãããŒã¿ ãã¡ã€ã«ããå€ãããŒããããããããªã¹ãã«å€æããããšãã§ããŸãã
[`~data.processors.utils.InputExample`]ã
[[autodoc]] data.processors.glue.glue_convert_examples_to_features
## XNLI
[ã¯ãã¹ãªã³ã¬ã« NLI ã³ãŒãã¹ (XNLI)](https://www.nyu.edu/projects/bowman/xnli/) ã¯ã
èšèªãè¶
ããããã¹ãè¡šçŸã®å質ã XNLI ã¯ã[*MultiNLI*](http://www.nyu.edu/projects/bowman/multinli/) ã«åºã¥ãã¯ã©ãŠããœãŒã¹ã®ããŒã¿ã»ããã§ããããã¹ãã®ãã¢ã«ã¯ã15 åã®ããã¹ãå«æã¢ãããŒã·ã§ã³ãã©ãã«ä»ããããŠããŸãã
ããŸããŸãªèšèª (è±èªãªã©ã®é«ãªãœãŒã¹èšèªãšã¹ã¯ããªèªãªã©ã®äœãªãœãŒã¹èšèªã®äž¡æ¹ãå«ã)ã
è«æ [XNLI: Evaluating Cross-lingual Sentence Representations](https://arxiv.org/abs/1809.05053) ãšåæã«ãªãªãŒã¹ãããŸããã
ãã®ã©ã€ãã©ãªã¯ãXNLI ããŒã¿ãããŒãããããã»ããµããã¹ãããŸãã
- [`~data.processors.utils.XnliProcessor`]
ãã¹ãã»ããã«ã¯ãŽãŒã«ãã©ãã«ãä»ããŠãããããè©äŸ¡ã¯ãã¹ãã»ããã§è¡ãããŸãã®ã§ãäºæ¿ãã ããã
ãããã®ããã»ããµã䜿çšããäŸã¯ã[run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_xnli.py) ã¹ã¯ãªããã«ç€ºãããŠããŸãã
## SQuAD
[The Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer//) ã¯ã次ã®ãã³ãããŒã¯ã§ãã
質åå¿çã«é¢ããã¢ãã«ã®ããã©ãŒãã³ã¹ãè©äŸ¡ããŸãã v1.1 ãš v2.0 ã® 2 ã€ã®ããŒãžã§ã³ãå©çšå¯èœã§ããæåã®ããŒãžã§ã³
(v1.1) ã¯ãè«æ [SQuAD: 100,000+ question for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) ãšãšãã«ãªãªãŒã¹ãããŸããã 2 çªç®ã®ããŒãžã§ã³ (v2.0) ã¯ãè«æ [Know What You Don't ãšåæã«ãªãªãŒã¹ãããŸããã
ç¥ã£ãŠããã¹ã: SQuAD ã®çããããªã質å](https://arxiv.org/abs/1806.03822)ã
ãã®ã©ã€ãã©ãªã¯ã次㮠2 ã€ã®ããŒãžã§ã³ã®ããããã®ããã»ããµããã¹ãããŸãã
### Processors
ãããã®ããã»ããµã¯æ¬¡ã®ãšããã§ãã
- [`~data.processors.utils.SquadV1Processor`]
- [`~data.processors.utils.SquadV2Processor`]
ã©ã¡ããæœè±¡ã¯ã©ã¹ [`~data.processors.utils.SquadProcessor`] ãç¶æ¿ããŠããŸãã
[[autodoc]] data.processors.squad.SquadProcessor
- all
ããã«ã次ã®ã¡ãœããã䜿çšããŠãSQuAD ã®äŸã次ã®åœ¢åŒã«å€æã§ããŸãã
ã¢ãã«ã®å
¥åãšããŠäœ¿çšã§ãã [`~data.processors.utils.SquadFeatures`]ã
[[autodoc]] data.processors.squad.squad_convert_examples_to_features
ãããã®ããã»ããµãšåè¿°ã®æ¹æ³ã¯ãããŒã¿ãå«ããã¡ã€ã«ã ãã§ãªãã
*tensorflow_datasets* ããã±ãŒãžã以äžã«äŸã瀺ããŸãã
### Example usage
以äžã«ããã»ããµã䜿çšããäŸãšãããŒã¿ ãã¡ã€ã«ã䜿çšããå€ææ¹æ³ã瀺ããŸãã
```python
# Loading a V2 processor
processor = SquadV2Processor()
examples = processor.get_dev_examples(squad_v2_data_dir)
# Loading a V1 processor
processor = SquadV1Processor()
examples = processor.get_dev_examples(squad_v1_data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=args.doc_stride,
max_query_length=max_query_length,
is_training=not evaluate,
)
```
*tensorflow_datasets* ã®äœ¿çšã¯ãããŒã¿ ãã¡ã€ã«ã䜿çšããã®ãšåããããç°¡åã§ãã
```python
# tensorflow_datasets only handle Squad V1.
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=args.doc_stride,
max_query_length=max_query_length,
is_training=not evaluate,
)
```
ãããã®ããã»ããµã䜿çšããå¥ã®äŸã¯ã[run_squad.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py) ã¹ã¯ãªããã«ç€ºãããŠããŸãã
| transformers/docs/source/ja/main_classes/processors.md/0 | {
"file_path": "transformers/docs/source/ja/main_classes/processors.md",
"repo_id": "transformers",
"token_count": 3103
} | 279 |
Subsets and Splits