ZTWHHH commited on
Commit
febb5fa
·
verified ·
1 Parent(s): 28d7d30

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. janus/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc +0 -0
  3. janus/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py +57 -0
  4. janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc +0 -0
  5. janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc +0 -0
  6. janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc +0 -0
  7. janus/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py +242 -0
  8. janus/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py +0 -0
  9. janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc +0 -0
  10. janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc +0 -0
  11. janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc +0 -0
  12. janus/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py +138 -0
  13. janus/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py +1323 -0
  14. janus/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py +112 -0
  15. janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc +0 -0
  16. janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc +0 -0
  17. janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc +0 -0
  18. janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc +0 -0
  19. janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc +0 -0
  20. janus/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py +237 -0
  21. janus/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py +93 -0
  22. janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/__init__.cpython-310.pyc +0 -0
  23. janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +946 -0
  24. janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +391 -0
  25. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/__init__.cpython-310.pyc +0 -0
  26. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/configuration_instructblipvideo.cpython-310.pyc +0 -0
  27. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/image_processing_instructblipvideo.cpython-310.pyc +0 -0
  28. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modeling_instructblipvideo.cpython-310.pyc +0 -0
  29. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modular_instructblipvideo.cpython-310.pyc +0 -0
  30. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/processing_instructblipvideo.cpython-310.pyc +0 -0
  31. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/image_processing_instructblipvideo.py +348 -0
  32. janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/processing_instructblipvideo.py +236 -0
  33. janus/lib/python3.10/site-packages/transformers/models/lilt/__init__.py +27 -0
  34. janus/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/__init__.cpython-310.pyc +0 -0
  35. janus/lib/python3.10/site-packages/transformers/models/lilt/configuration_lilt.py +131 -0
  36. janus/lib/python3.10/site-packages/transformers/models/lilt/modeling_lilt.py +1192 -0
  37. janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc +0 -0
  38. janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc +0 -0
  39. janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc +0 -0
  40. janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc +0 -0
  41. janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc +0 -0
  42. janus/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc +3 -0
  43. janus/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/__init__.cpython-310.pyc +0 -0
  44. janus/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/configuration_persimmon.cpython-310.pyc +0 -0
  45. janus/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/modeling_persimmon.cpython-310.pyc +0 -0
  46. janus/lib/python3.10/site-packages/transformers/models/persimmon/configuration_persimmon.py +176 -0
  47. janus/lib/python3.10/site-packages/transformers/models/persimmon/modeling_persimmon.py +1128 -0
  48. janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/__init__.cpython-310.pyc +0 -0
  49. janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/configuration_pixtral.cpython-310.pyc +0 -0
  50. janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/image_processing_pixtral.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -441,3 +441,4 @@ janus/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
441
  janus/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
442
  janus/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
443
  janus/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
441
  janus/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
442
  janus/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
443
  janus/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
444
+ janus/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
janus/lib/python3.10/site-packages/transformers/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/autoformer/__init__.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_autoformer": ["AutoformerConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_autoformer"] = [
31
+ "AutoformerForPrediction",
32
+ "AutoformerModel",
33
+ "AutoformerPreTrainedModel",
34
+ ]
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from .configuration_autoformer import (
39
+ AutoformerConfig,
40
+ )
41
+
42
+ try:
43
+ if not is_torch_available():
44
+ raise OptionalDependencyNotAvailable()
45
+ except OptionalDependencyNotAvailable:
46
+ pass
47
+ else:
48
+ from .modeling_autoformer import (
49
+ AutoformerForPrediction,
50
+ AutoformerModel,
51
+ AutoformerPreTrainedModel,
52
+ )
53
+
54
+ else:
55
+ import sys
56
+
57
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (822 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/configuration_autoformer.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/autoformer/__pycache__/modeling_autoformer.cpython-310.pyc ADDED
Binary file (79.4 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/autoformer/configuration_autoformer.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Autoformer model configuration"""
16
+
17
+ from typing import List, Optional
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class AutoformerConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of an [`AutoformerModel`]. It is used to instantiate an
29
+ Autoformer model according to the specified arguments, defining the model architecture. Instantiating a
30
+ configuration with the defaults will yield a similar configuration to that of the Autoformer
31
+ [huggingface/autoformer-tourism-monthly](https://huggingface.co/huggingface/autoformer-tourism-monthly)
32
+ architecture.
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+ Args:
38
+ prediction_length (`int`):
39
+ The prediction length for the decoder. In other words, the prediction horizon of the model.
40
+ context_length (`int`, *optional*, defaults to `prediction_length`):
41
+ The context length for the encoder. If unset, the context length will be the same as the
42
+ `prediction_length`.
43
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
44
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
45
+ loss (`string`, *optional*, defaults to `"nll"`):
46
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
47
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
48
+ input_size (`int`, *optional*, defaults to 1):
49
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
50
+ multivariate targets.
51
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
52
+ The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4,
53
+ 5, 6, 7]`.
54
+ scaling (`bool`, *optional* defaults to `True`):
55
+ Whether to scale the input targets.
56
+ num_time_features (`int`, *optional*, defaults to 0):
57
+ The number of time features in the input time series.
58
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
59
+ The number of dynamic real valued features.
60
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
61
+ The number of static categorical features.
62
+ num_static_real_features (`int`, *optional*, defaults to 0):
63
+ The number of static real valued features.
64
+ cardinality (`list[int]`, *optional*):
65
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
66
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
67
+ `num_static_categorical_features` is > 0.
68
+ embedding_dimension (`list[int]`, *optional*):
69
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
70
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
71
+ `num_static_categorical_features` is > 0.
72
+ d_model (`int`, *optional*, defaults to 64):
73
+ Dimensionality of the transformer layers.
74
+ encoder_layers (`int`, *optional*, defaults to 2):
75
+ Number of encoder layers.
76
+ decoder_layers (`int`, *optional*, defaults to 2):
77
+ Number of decoder layers.
78
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
79
+ Number of attention heads for each attention layer in the Transformer encoder.
80
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
81
+ Number of attention heads for each attention layer in the Transformer decoder.
82
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
83
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
84
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
85
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
86
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
87
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
88
+ `"relu"` are supported.
89
+ dropout (`float`, *optional*, defaults to 0.1):
90
+ The dropout probability for all fully connected layers in the encoder, and decoder.
91
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
92
+ The dropout probability for the attention and fully connected layers for each encoder layer.
93
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
94
+ The dropout probability for the attention and fully connected layers for each decoder layer.
95
+ attention_dropout (`float`, *optional*, defaults to 0.1):
96
+ The dropout probability for the attention probabilities.
97
+ activation_dropout (`float`, *optional*, defaults to 0.1):
98
+ The dropout probability used between the two layers of the feed-forward networks.
99
+ num_parallel_samples (`int`, *optional*, defaults to 100):
100
+ The number of samples to generate in parallel for each time step of inference.
101
+ init_std (`float`, *optional*, defaults to 0.02):
102
+ The standard deviation of the truncated normal weight initialization distribution.
103
+ use_cache (`bool`, *optional*, defaults to `True`):
104
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
105
+ label_length (`int`, *optional*, defaults to 10):
106
+ Start token length of the Autoformer decoder, which is used for direct multi-step prediction (i.e.
107
+ non-autoregressive generation).
108
+ moving_average (`int`, *optional*, defaults to 25):
109
+ The window size of the moving average. In practice, it's the kernel size in AvgPool1d of the Decomposition
110
+ Layer.
111
+ autocorrelation_factor (`int`, *optional*, defaults to 3):
112
+ "Attention" (i.e. AutoCorrelation mechanism) factor which is used to find top k autocorrelations delays.
113
+ It's recommended in the paper to set it to a number between 1 and 5.
114
+
115
+
116
+ Example:
117
+
118
+ ```python
119
+ >>> from transformers import AutoformerConfig, AutoformerModel
120
+
121
+ >>> # Initializing a default Autoformer configuration
122
+ >>> configuration = AutoformerConfig()
123
+
124
+ >>> # Randomly initializing a model (with random weights) from the configuration
125
+ >>> model = AutoformerModel(configuration)
126
+
127
+ >>> # Accessing the model configuration
128
+ >>> configuration = model.config
129
+ ```"""
130
+
131
+ model_type = "autoformer"
132
+ attribute_map = {
133
+ "hidden_size": "d_model",
134
+ "num_attention_heads": "encoder_attention_heads",
135
+ "num_hidden_layers": "encoder_layers",
136
+ }
137
+
138
+ def __init__(
139
+ self,
140
+ prediction_length: Optional[int] = None,
141
+ context_length: Optional[int] = None,
142
+ distribution_output: str = "student_t",
143
+ loss: str = "nll",
144
+ input_size: int = 1,
145
+ lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7],
146
+ scaling: bool = True,
147
+ num_time_features: int = 0,
148
+ num_dynamic_real_features: int = 0,
149
+ num_static_categorical_features: int = 0,
150
+ num_static_real_features: int = 0,
151
+ cardinality: Optional[List[int]] = None,
152
+ embedding_dimension: Optional[List[int]] = None,
153
+ d_model: int = 64,
154
+ encoder_attention_heads: int = 2,
155
+ decoder_attention_heads: int = 2,
156
+ encoder_layers: int = 2,
157
+ decoder_layers: int = 2,
158
+ encoder_ffn_dim: int = 32,
159
+ decoder_ffn_dim: int = 32,
160
+ activation_function: str = "gelu",
161
+ dropout: float = 0.1,
162
+ encoder_layerdrop: float = 0.1,
163
+ decoder_layerdrop: float = 0.1,
164
+ attention_dropout: float = 0.1,
165
+ activation_dropout: float = 0.1,
166
+ num_parallel_samples: int = 100,
167
+ init_std: float = 0.02,
168
+ use_cache: bool = True,
169
+ is_encoder_decoder=True,
170
+ # Autoformer arguments
171
+ label_length: int = 10,
172
+ moving_average: int = 25,
173
+ autocorrelation_factor: int = 3,
174
+ **kwargs,
175
+ ):
176
+ # time series specific configuration
177
+ self.prediction_length = prediction_length
178
+ self.context_length = context_length if context_length is not None else prediction_length
179
+ self.distribution_output = distribution_output
180
+ self.loss = loss
181
+ self.input_size = input_size
182
+ self.num_time_features = num_time_features
183
+ self.lags_sequence = lags_sequence
184
+ self.scaling = scaling
185
+ self.num_dynamic_real_features = num_dynamic_real_features
186
+ self.num_static_real_features = num_static_real_features
187
+ self.num_static_categorical_features = num_static_categorical_features
188
+ if cardinality is not None and num_static_categorical_features > 0:
189
+ if len(cardinality) != num_static_categorical_features:
190
+ raise ValueError(
191
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
192
+ )
193
+ self.cardinality = cardinality
194
+ else:
195
+ self.cardinality = [0]
196
+ if embedding_dimension is not None and num_static_categorical_features > 0:
197
+ if len(embedding_dimension) != num_static_categorical_features:
198
+ raise ValueError(
199
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
200
+ )
201
+ self.embedding_dimension = embedding_dimension
202
+ else:
203
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
204
+ self.num_parallel_samples = num_parallel_samples
205
+
206
+ # Transformer architecture configuration
207
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
208
+ self.d_model = d_model
209
+ self.encoder_attention_heads = encoder_attention_heads
210
+ self.decoder_attention_heads = decoder_attention_heads
211
+ self.encoder_ffn_dim = encoder_ffn_dim
212
+ self.decoder_ffn_dim = decoder_ffn_dim
213
+ self.encoder_layers = encoder_layers
214
+ self.decoder_layers = decoder_layers
215
+
216
+ self.dropout = dropout
217
+ self.attention_dropout = attention_dropout
218
+ self.activation_dropout = activation_dropout
219
+ self.encoder_layerdrop = encoder_layerdrop
220
+ self.decoder_layerdrop = decoder_layerdrop
221
+
222
+ self.activation_function = activation_function
223
+ self.init_std = init_std
224
+
225
+ self.use_cache = use_cache
226
+
227
+ # Autoformer
228
+ self.label_length = label_length
229
+ self.moving_average = moving_average
230
+ self.autocorrelation_factor = autocorrelation_factor
231
+
232
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
233
+
234
+ @property
235
+ def _number_of_features(self) -> int:
236
+ return (
237
+ sum(self.embedding_dimension)
238
+ + self.num_dynamic_real_features
239
+ + self.num_time_features
240
+ + self.num_static_real_features
241
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
242
+ )
janus/lib/python3.10/site-packages/transformers/models/autoformer/modeling_autoformer.py ADDED
The diff for this file is too large to render. See raw diff
 
janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (558 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/configuration_bros.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/modeling_bros.cpython-310.pyc ADDED
Binary file (36.7 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/bros/configuration_bros.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Bros model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class BrosConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`BrosModel`] or a [`TFBrosModel`]. It is used to
27
+ instantiate a Bros model according to the specified arguments, defining the model architecture. Instantiating a
28
+ configuration with the defaults will yield a similar configuration to that of the Bros
29
+ [jinho8345/bros-base-uncased](https://huggingface.co/jinho8345/bros-base-uncased) architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+ Args:
35
+ vocab_size (`int`, *optional*, defaults to 30522):
36
+ Vocabulary size of the Bros model. Defines the number of different tokens that can be represented by the
37
+ `inputs_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
38
+ hidden_size (`int`, *optional*, defaults to 768):
39
+ Dimensionality of the encoder layers and the pooler layer.
40
+ num_hidden_layers (`int`, *optional*, defaults to 12):
41
+ Number of hidden layers in the Transformer encoder.
42
+ num_attention_heads (`int`, *optional*, defaults to 12):
43
+ Number of attention heads for each attention layer in the Transformer encoder.
44
+ intermediate_size (`int`, *optional*, defaults to 3072):
45
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
46
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
47
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
48
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
49
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
50
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
51
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
52
+ The dropout ratio for the attention probabilities.
53
+ max_position_embeddings (`int`, *optional*, defaults to 512):
54
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
55
+ just in case (e.g., 512 or 1024 or 2048).
56
+ type_vocab_size (`int`, *optional*, defaults to 2):
57
+ The vocabulary size of the `token_type_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
61
+ The epsilon used by the layer normalization layers.
62
+ pad_token_id (`int`, *optional*, defaults to 0):
63
+ The index of the padding token in the token vocabulary.
64
+ dim_bbox (`int`, *optional*, defaults to 8):
65
+ The dimension of the bounding box coordinates. (x0, y1, x1, y0, x1, y1, x0, y1)
66
+ bbox_scale (`float`, *optional*, defaults to 100.0):
67
+ The scale factor of the bounding box coordinates.
68
+ n_relations (`int`, *optional*, defaults to 1):
69
+ The number of relations for SpadeEE(entity extraction), SpadeEL(entity linking) head.
70
+ classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
71
+ The dropout ratio for the classifier head.
72
+
73
+
74
+ Examples:
75
+
76
+ ```python
77
+ >>> from transformers import BrosConfig, BrosModel
78
+
79
+ >>> # Initializing a BROS jinho8345/bros-base-uncased style configuration
80
+ >>> configuration = BrosConfig()
81
+
82
+ >>> # Initializing a model from the jinho8345/bros-base-uncased style configuration
83
+ >>> model = BrosModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+
89
+ model_type = "bros"
90
+
91
+ def __init__(
92
+ self,
93
+ vocab_size=30522,
94
+ hidden_size=768,
95
+ num_hidden_layers=12,
96
+ num_attention_heads=12,
97
+ intermediate_size=3072,
98
+ hidden_act="gelu",
99
+ hidden_dropout_prob=0.1,
100
+ attention_probs_dropout_prob=0.1,
101
+ max_position_embeddings=512,
102
+ type_vocab_size=2,
103
+ initializer_range=0.02,
104
+ layer_norm_eps=1e-12,
105
+ pad_token_id=0,
106
+ dim_bbox=8,
107
+ bbox_scale=100.0,
108
+ n_relations=1,
109
+ classifier_dropout_prob=0.1,
110
+ **kwargs,
111
+ ):
112
+ super().__init__(
113
+ vocab_size=vocab_size,
114
+ hidden_size=hidden_size,
115
+ num_hidden_layers=num_hidden_layers,
116
+ num_attention_heads=num_attention_heads,
117
+ intermediate_size=intermediate_size,
118
+ hidden_act=hidden_act,
119
+ hidden_dropout_prob=hidden_dropout_prob,
120
+ attention_probs_dropout_prob=attention_probs_dropout_prob,
121
+ max_position_embeddings=max_position_embeddings,
122
+ type_vocab_size=type_vocab_size,
123
+ initializer_range=initializer_range,
124
+ layer_norm_eps=layer_norm_eps,
125
+ pad_token_id=pad_token_id,
126
+ **kwargs,
127
+ )
128
+
129
+ self.dim_bbox = dim_bbox
130
+ self.bbox_scale = bbox_scale
131
+ self.n_relations = n_relations
132
+ self.dim_bbox_sinusoid_emb_2d = self.hidden_size // 4
133
+ self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox
134
+ self.dim_bbox_projection = self.hidden_size // self.num_attention_heads
135
+ self.classifier_dropout_prob = classifier_dropout_prob
136
+
137
+
138
+ __all__ = ["BrosConfig"]
janus/lib/python3.10/site-packages/transformers/models/bros/modeling_bros.py ADDED
@@ -0,0 +1,1323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present NAVER Corp, The Microsoft Research Asia LayoutLM Team Authors and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch Bros model."""
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import CrossEntropyLoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_outputs import (
28
+ BaseModelOutputWithPastAndCrossAttentions,
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ TokenClassifierOutput,
31
+ )
32
+ from ...modeling_utils import PreTrainedModel
33
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
34
+ from ...utils import (
35
+ ModelOutput,
36
+ add_start_docstrings,
37
+ add_start_docstrings_to_model_forward,
38
+ logging,
39
+ replace_return_docstrings,
40
+ )
41
+ from .configuration_bros import BrosConfig
42
+
43
+
44
+ logger = logging.get_logger(__name__)
45
+
46
+ _CHECKPOINT_FOR_DOC = "jinho8345/bros-base-uncased"
47
+ _CONFIG_FOR_DOC = "BrosConfig"
48
+
49
+
50
+ BROS_START_DOCSTRING = r"""
51
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
52
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
53
+ and behavior.
54
+
55
+ Parameters:
56
+ config ([`BrosConfig`]): Model configuration class with all the parameters of the model.
57
+ Initializing with a config file does not load the weights associated with the model, only the
58
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
59
+ """
60
+
61
+ BROS_INPUTS_DOCSTRING = r"""
62
+ Args:
63
+ input_ids (`torch.LongTensor` of shape `({0})`):
64
+ Indices of input sequence tokens in the vocabulary.
65
+
66
+ Indices can be obtained using [`BrosProcessor`]. See [`PreTrainedTokenizer.encode`] and
67
+ [`PreTrainedTokenizer.__call__`] for details.
68
+
69
+ [What are input IDs?](../glossary#input-ids)
70
+
71
+ bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
72
+ Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
73
+ (x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
74
+ bounding box.
75
+
76
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
77
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
78
+
79
+ - 1 for tokens that are **not masked**,
80
+ - 0 for tokens that are **masked**.
81
+
82
+ [What are attention masks?](../glossary#attention-mask)
83
+
84
+ bbox_first_token_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
85
+ Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
86
+
87
+ - 1 for tokens that are **not masked**,
88
+ - 0 for tokens that are **masked**.
89
+
90
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
91
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
92
+ 1]`:
93
+
94
+ - 0 corresponds to a *sentence A* token,
95
+ - 1 corresponds to a *sentence B* token.
96
+
97
+ [What are token type IDs?](../glossary#token-type-ids)
98
+
99
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
100
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
101
+ config.max_position_embeddings - 1]`.
102
+
103
+ [What are position IDs?](../glossary#position-ids)
104
+
105
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
106
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
107
+
108
+ - 1 indicates the head is **not masked**,
109
+ - 0 indicates the head is **masked**.
110
+
111
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
112
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
113
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
114
+ model's internal embedding lookup matrix.
115
+
116
+ output_attentions (`bool`, *optional*):
117
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
118
+ tensors for more detail.
119
+
120
+ output_hidden_states (`bool`, *optional*):
121
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
122
+ more detail.
123
+
124
+ return_dict (`bool`, *optional*):
125
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
126
+ """
127
+
128
+
129
+ @dataclass
130
+ class BrosSpadeOutput(ModelOutput):
131
+ """
132
+ Base class for outputs of token classification models.
133
+
134
+ Args:
135
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
136
+ Classification loss.
137
+ initial_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
138
+ Classification scores for entity initial tokens (before SoftMax).
139
+ subsequent_token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length+1)`):
140
+ Classification scores for entity sequence tokens (before SoftMax).
141
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
142
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
143
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
144
+
145
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
146
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
147
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
148
+ sequence_length)`.
149
+
150
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
151
+ heads.
152
+ """
153
+
154
+ loss: Optional[torch.FloatTensor] = None
155
+ initial_token_logits: torch.FloatTensor = None
156
+ subsequent_token_logits: torch.FloatTensor = None
157
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
158
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
159
+
160
+
161
+ class BrosPositionalEmbedding1D(nn.Module):
162
+ # Reference: https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py#L15
163
+
164
+ def __init__(self, config):
165
+ super(BrosPositionalEmbedding1D, self).__init__()
166
+
167
+ self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d
168
+
169
+ inv_freq = 1 / (
170
+ 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d)
171
+ )
172
+ self.register_buffer("inv_freq", inv_freq)
173
+
174
+ def forward(self, pos_seq: torch.Tensor) -> torch.Tensor:
175
+ seq_size = pos_seq.size()
176
+ b1, b2, b3 = seq_size
177
+ sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2)
178
+ pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
179
+ return pos_emb
180
+
181
+
182
+ class BrosPositionalEmbedding2D(nn.Module):
183
+ def __init__(self, config):
184
+ super(BrosPositionalEmbedding2D, self).__init__()
185
+
186
+ self.dim_bbox = config.dim_bbox
187
+ self.x_pos_emb = BrosPositionalEmbedding1D(config)
188
+ self.y_pos_emb = BrosPositionalEmbedding1D(config)
189
+
190
+ def forward(self, bbox: torch.Tensor) -> torch.Tensor:
191
+ stack = []
192
+ for i in range(self.dim_bbox):
193
+ if i % 2 == 0:
194
+ stack.append(self.x_pos_emb(bbox[..., i]))
195
+ else:
196
+ stack.append(self.y_pos_emb(bbox[..., i]))
197
+ bbox_pos_emb = torch.cat(stack, dim=-1)
198
+ return bbox_pos_emb
199
+
200
+
201
+ class BrosBboxEmbeddings(nn.Module):
202
+ def __init__(self, config):
203
+ super(BrosBboxEmbeddings, self).__init__()
204
+ self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config)
205
+ self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False)
206
+
207
+ def forward(self, bbox: torch.Tensor):
208
+ bbox_t = bbox.transpose(0, 1)
209
+ bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :]
210
+ bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos)
211
+ bbox_pos_emb = self.bbox_projection(bbox_pos_emb)
212
+
213
+ return bbox_pos_emb
214
+
215
+
216
+ class BrosTextEmbeddings(nn.Module):
217
+ """Construct the embeddings from word, position and token_type embeddings."""
218
+
219
+ def __init__(self, config):
220
+ super().__init__()
221
+
222
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
223
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
224
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
225
+
226
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
227
+ # any TensorFlow checkpoint file
228
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
229
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
230
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
231
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
232
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
233
+ self.register_buffer(
234
+ "token_type_ids",
235
+ torch.zeros(
236
+ self.position_ids.size(),
237
+ dtype=torch.long,
238
+ device=self.position_ids.device,
239
+ ),
240
+ persistent=False,
241
+ )
242
+
243
+ def forward(
244
+ self,
245
+ input_ids: Optional[torch.Tensor] = None,
246
+ token_type_ids: Optional[torch.Tensor] = None,
247
+ position_ids: Optional[torch.Tensor] = None,
248
+ inputs_embeds: Optional[torch.Tensor] = None,
249
+ past_key_values_length: int = 0,
250
+ ) -> torch.Tensor:
251
+ if input_ids is not None:
252
+ input_shape = input_ids.size()
253
+ else:
254
+ input_shape = inputs_embeds.size()[:-1]
255
+
256
+ seq_length = input_shape[1]
257
+
258
+ if position_ids is None:
259
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
260
+
261
+ if token_type_ids is None:
262
+ if hasattr(self, "token_type_ids"):
263
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
264
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
265
+ token_type_ids = buffered_token_type_ids_expanded
266
+ else:
267
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
268
+
269
+ if inputs_embeds is None:
270
+ inputs_embeds = self.word_embeddings(input_ids)
271
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
272
+
273
+ embeddings = inputs_embeds + token_type_embeddings
274
+ if self.position_embedding_type == "absolute":
275
+ position_embeddings = self.position_embeddings(position_ids)
276
+ embeddings += position_embeddings
277
+ embeddings = self.LayerNorm(embeddings)
278
+ embeddings = self.dropout(embeddings)
279
+ return embeddings
280
+
281
+
282
+ class BrosSelfAttention(nn.Module):
283
+ def __init__(self, config):
284
+ super().__init__()
285
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
286
+ raise ValueError(
287
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
288
+ f"heads ({config.num_attention_heads})"
289
+ )
290
+
291
+ self.num_attention_heads = config.num_attention_heads
292
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
293
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
294
+
295
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
296
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
297
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
298
+
299
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
300
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
301
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
302
+ self.max_position_embeddings = config.max_position_embeddings
303
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
304
+
305
+ self.is_decoder = config.is_decoder
306
+
307
+ def transpose_for_scores(self, x: torch.Tensor):
308
+ new_x_shape = x.size()[:-1] + (
309
+ self.num_attention_heads,
310
+ self.attention_head_size,
311
+ )
312
+ x = x.view(*new_x_shape)
313
+ return x.permute(0, 2, 1, 3)
314
+
315
+ def forward(
316
+ self,
317
+ hidden_states: torch.Tensor,
318
+ bbox_pos_emb: torch.Tensor,
319
+ attention_mask: Optional[torch.Tensor] = None,
320
+ head_mask: Optional[torch.Tensor] = None,
321
+ encoder_hidden_states: Optional[torch.Tensor] = None,
322
+ encoder_attention_mask: Optional[torch.Tensor] = None,
323
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
324
+ output_attentions: Optional[torch.Tensor] = False,
325
+ ) -> Tuple[torch.Tensor]:
326
+ mixed_query_layer = self.query(hidden_states)
327
+
328
+ # If this is instantiated as a cross-attention module, the keys
329
+ # and values come from an encoder; the attention mask needs to be
330
+ # such that the encoder's padding tokens are not attended to.
331
+ is_cross_attention = encoder_hidden_states is not None
332
+
333
+ if is_cross_attention and past_key_value is not None:
334
+ # reuse k,v, cross_attentions
335
+ key_layer = past_key_value[0]
336
+ value_layer = past_key_value[1]
337
+ attention_mask = encoder_attention_mask
338
+ elif is_cross_attention:
339
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
340
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
341
+ attention_mask = encoder_attention_mask
342
+ elif past_key_value is not None:
343
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
344
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
345
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
346
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
347
+ else:
348
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
349
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
350
+
351
+ query_layer = self.transpose_for_scores(mixed_query_layer)
352
+
353
+ if self.is_decoder:
354
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
355
+ # Further calls to cross_attention layer can then reuse all cross-attention
356
+ # key/value_states (first "if" case)
357
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
358
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
359
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
360
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
361
+ past_key_value = (key_layer, value_layer)
362
+
363
+ # Take the dot product between "query" and "key" to get the raw attention scores.
364
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
365
+
366
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
367
+ seq_length = hidden_states.size()[1]
368
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
369
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
370
+ distance = position_ids_l - position_ids_r
371
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
372
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
373
+
374
+ if self.position_embedding_type == "relative_key":
375
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
376
+ attention_scores = attention_scores + relative_position_scores
377
+ elif self.position_embedding_type == "relative_key_query":
378
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
379
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
380
+
381
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
382
+
383
+ # bbox positional encoding
384
+ batch_size, n_head, seq_length, d_head = query_layer.shape
385
+ bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head)
386
+ bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3])
387
+ bbox_pos_scores = torch.einsum("bnid,bijd->bnij", (query_layer, bbox_pos_emb))
388
+
389
+ attention_scores = attention_scores + bbox_pos_scores
390
+
391
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
392
+ if attention_mask is not None:
393
+ # Apply the attention mask is (precomputed for all layers in BrosModel forward() function)
394
+ attention_scores = attention_scores + attention_mask
395
+
396
+ # Normalize the attention scores to probabilities.
397
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
398
+
399
+ # This is actually dropping out entire tokens to attend to, which might
400
+ # seem a bit unusual, but is taken from the original Transformer paper.
401
+ attention_probs = self.dropout(attention_probs)
402
+
403
+ # Mask heads if we want to
404
+ if head_mask is not None:
405
+ attention_probs = attention_probs * head_mask
406
+
407
+ context_layer = torch.matmul(attention_probs, value_layer)
408
+
409
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
410
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
411
+ context_layer = context_layer.view(*new_context_layer_shape)
412
+
413
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
414
+
415
+ if self.is_decoder:
416
+ outputs = outputs + (past_key_value,)
417
+ return outputs
418
+
419
+
420
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Bros
421
+ class BrosSelfOutput(nn.Module):
422
+ def __init__(self, config):
423
+ super().__init__()
424
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
425
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
426
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
427
+
428
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
429
+ hidden_states = self.dense(hidden_states)
430
+ hidden_states = self.dropout(hidden_states)
431
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
432
+ return hidden_states
433
+
434
+
435
+ class BrosAttention(nn.Module):
436
+ def __init__(self, config):
437
+ super().__init__()
438
+ self.self = BrosSelfAttention(config)
439
+ self.output = BrosSelfOutput(config)
440
+ self.pruned_heads = set()
441
+
442
+ def prune_heads(self, heads):
443
+ if len(heads) == 0:
444
+ return
445
+ heads, index = find_pruneable_heads_and_indices(
446
+ heads,
447
+ self.self.num_attention_heads,
448
+ self.self.attention_head_size,
449
+ self.pruned_heads,
450
+ )
451
+
452
+ # Prune linear layers
453
+ self.self.query = prune_linear_layer(self.self.query, index)
454
+ self.self.key = prune_linear_layer(self.self.key, index)
455
+ self.self.value = prune_linear_layer(self.self.value, index)
456
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
457
+
458
+ # Update hyper params and store pruned heads
459
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
460
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
461
+ self.pruned_heads = self.pruned_heads.union(heads)
462
+
463
+ def forward(
464
+ self,
465
+ hidden_states: torch.Tensor,
466
+ bbox_pos_emb: torch.Tensor,
467
+ attention_mask: Optional[torch.Tensor] = None,
468
+ head_mask: Optional[torch.Tensor] = None,
469
+ encoder_hidden_states: Optional[torch.Tensor] = None,
470
+ encoder_attention_mask: Optional[torch.Tensor] = None,
471
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
472
+ output_attentions: Optional[bool] = False,
473
+ ) -> Tuple[torch.Tensor]:
474
+ self_outputs = self.self(
475
+ hidden_states=hidden_states,
476
+ bbox_pos_emb=bbox_pos_emb,
477
+ attention_mask=attention_mask,
478
+ head_mask=head_mask,
479
+ encoder_hidden_states=encoder_hidden_states,
480
+ encoder_attention_mask=encoder_attention_mask,
481
+ past_key_value=past_key_value,
482
+ output_attentions=output_attentions,
483
+ )
484
+ attention_output = self.output(self_outputs[0], hidden_states)
485
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
486
+ return outputs
487
+
488
+
489
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Bros
490
+ class BrosIntermediate(nn.Module):
491
+ def __init__(self, config):
492
+ super().__init__()
493
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
494
+ if isinstance(config.hidden_act, str):
495
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
496
+ else:
497
+ self.intermediate_act_fn = config.hidden_act
498
+
499
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
500
+ hidden_states = self.dense(hidden_states)
501
+ hidden_states = self.intermediate_act_fn(hidden_states)
502
+ return hidden_states
503
+
504
+
505
+ class BrosOutput(nn.Module):
506
+ def __init__(self, config):
507
+ super().__init__()
508
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
509
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
510
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
511
+
512
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
513
+ hidden_states = self.dense(hidden_states)
514
+ hidden_states = self.dropout(hidden_states)
515
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
516
+ return hidden_states
517
+
518
+
519
+ class BrosLayer(nn.Module):
520
+ def __init__(self, config):
521
+ super().__init__()
522
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
523
+ self.seq_len_dim = 1
524
+ self.attention = BrosAttention(config)
525
+ self.is_decoder = config.is_decoder
526
+ self.add_cross_attention = config.add_cross_attention
527
+ if self.add_cross_attention:
528
+ if not self.is_decoder:
529
+ raise Exception(f"{self} should be used as a decoder model if cross attention is added")
530
+ self.crossattention = BrosAttention(config)
531
+ self.intermediate = BrosIntermediate(config)
532
+ self.output = BrosOutput(config)
533
+
534
+ def forward(
535
+ self,
536
+ hidden_states: torch.Tensor,
537
+ bbox_pos_emb: torch.Tensor,
538
+ attention_mask: Optional[torch.FloatTensor] = None,
539
+ head_mask: Optional[torch.FloatTensor] = None,
540
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
541
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
542
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
543
+ output_attentions: Optional[bool] = False,
544
+ ) -> Tuple[torch.Tensor]:
545
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
546
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
547
+ self_attention_outputs = self.attention(
548
+ hidden_states,
549
+ bbox_pos_emb=bbox_pos_emb,
550
+ attention_mask=attention_mask,
551
+ head_mask=head_mask,
552
+ output_attentions=output_attentions,
553
+ past_key_value=self_attn_past_key_value,
554
+ )
555
+ attention_output = self_attention_outputs[0]
556
+
557
+ # if decoder, the last output is tuple of self-attn cache
558
+ if self.is_decoder:
559
+ outputs = self_attention_outputs[1:-1]
560
+ present_key_value = self_attention_outputs[-1]
561
+ else:
562
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
563
+
564
+ cross_attn_present_key_value = None
565
+ if self.is_decoder and encoder_hidden_states is not None:
566
+ if hasattr(self, "crossattention"):
567
+ raise Exception(
568
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
569
+ )
570
+
571
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
572
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
573
+ cross_attention_outputs = self.crossattention(
574
+ attention_output,
575
+ attention_mask,
576
+ head_mask,
577
+ encoder_hidden_states,
578
+ encoder_attention_mask,
579
+ cross_attn_past_key_value,
580
+ output_attentions,
581
+ )
582
+ attention_output = cross_attention_outputs[0]
583
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
584
+
585
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
586
+ cross_attn_present_key_value = cross_attention_outputs[-1]
587
+ present_key_value = present_key_value + cross_attn_present_key_value
588
+
589
+ layer_output = apply_chunking_to_forward(
590
+ self.feed_forward_chunk,
591
+ self.chunk_size_feed_forward,
592
+ self.seq_len_dim,
593
+ attention_output,
594
+ )
595
+ outputs = (layer_output,) + outputs
596
+
597
+ # if decoder, return the attn key/values as the last output
598
+ if self.is_decoder:
599
+ outputs = outputs + (present_key_value,)
600
+
601
+ return outputs
602
+
603
+ def feed_forward_chunk(self, attention_output):
604
+ intermediate_output = self.intermediate(attention_output)
605
+ layer_output = self.output(intermediate_output, attention_output)
606
+ return layer_output
607
+
608
+
609
+ class BrosEncoder(nn.Module):
610
+ def __init__(self, config):
611
+ super().__init__()
612
+ self.config = config
613
+ self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)])
614
+
615
+ def forward(
616
+ self,
617
+ hidden_states: torch.Tensor,
618
+ bbox_pos_emb: torch.Tensor,
619
+ attention_mask: Optional[torch.FloatTensor] = None,
620
+ head_mask: Optional[torch.FloatTensor] = None,
621
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
622
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
623
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
624
+ use_cache: Optional[bool] = None,
625
+ output_attentions: Optional[bool] = False,
626
+ output_hidden_states: Optional[bool] = False,
627
+ return_dict: Optional[bool] = True,
628
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
629
+ all_hidden_states = () if output_hidden_states else None
630
+ all_self_attentions = () if output_attentions else None
631
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
632
+
633
+ next_decoder_cache = () if use_cache else None
634
+ for i, layer_module in enumerate(self.layer):
635
+ if output_hidden_states:
636
+ all_hidden_states = all_hidden_states + (hidden_states,)
637
+
638
+ layer_head_mask = head_mask[i] if head_mask is not None else None
639
+ past_key_value = past_key_values[i] if past_key_values is not None else None
640
+
641
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
642
+ if use_cache:
643
+ logger.warning(
644
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
645
+ "`use_cache=False`..."
646
+ )
647
+ use_cache = False
648
+ layer_outputs = self._gradient_checkpointing_func(
649
+ layer_module.__call__,
650
+ hidden_states,
651
+ bbox_pos_emb,
652
+ attention_mask,
653
+ layer_head_mask,
654
+ encoder_hidden_states,
655
+ encoder_attention_mask,
656
+ output_attentions,
657
+ )
658
+ else:
659
+ layer_outputs = layer_module(
660
+ hidden_states=hidden_states,
661
+ bbox_pos_emb=bbox_pos_emb,
662
+ attention_mask=attention_mask,
663
+ head_mask=layer_head_mask,
664
+ encoder_hidden_states=encoder_hidden_states,
665
+ encoder_attention_mask=encoder_attention_mask,
666
+ past_key_value=past_key_value,
667
+ output_attentions=output_attentions,
668
+ )
669
+
670
+ hidden_states = layer_outputs[0]
671
+ if use_cache:
672
+ next_decoder_cache += (layer_outputs[-1],)
673
+ if output_attentions:
674
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
675
+ if self.config.add_cross_attention:
676
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
677
+
678
+ if output_hidden_states:
679
+ all_hidden_states = all_hidden_states + (hidden_states,)
680
+
681
+ if not return_dict:
682
+ return tuple(
683
+ v
684
+ for v in [
685
+ hidden_states,
686
+ next_decoder_cache,
687
+ all_hidden_states,
688
+ all_self_attentions,
689
+ all_cross_attentions,
690
+ ]
691
+ if v is not None
692
+ )
693
+ return BaseModelOutputWithPastAndCrossAttentions(
694
+ last_hidden_state=hidden_states,
695
+ past_key_values=next_decoder_cache,
696
+ hidden_states=all_hidden_states,
697
+ attentions=all_self_attentions,
698
+ cross_attentions=all_cross_attentions,
699
+ )
700
+
701
+
702
+ # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Bros
703
+ class BrosPooler(nn.Module):
704
+ def __init__(self, config):
705
+ super().__init__()
706
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
707
+ self.activation = nn.Tanh()
708
+
709
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
710
+ # We "pool" the model by simply taking the hidden state corresponding
711
+ # to the first token.
712
+ first_token_tensor = hidden_states[:, 0]
713
+ pooled_output = self.dense(first_token_tensor)
714
+ pooled_output = self.activation(pooled_output)
715
+ return pooled_output
716
+
717
+
718
+ class BrosRelationExtractor(nn.Module):
719
+ def __init__(self, config):
720
+ super().__init__()
721
+ self.n_relations = config.n_relations
722
+ self.backbone_hidden_size = config.hidden_size
723
+ self.head_hidden_size = config.hidden_size
724
+ self.classifier_dropout_prob = config.classifier_dropout_prob
725
+
726
+ self.drop = nn.Dropout(self.classifier_dropout_prob)
727
+ self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
728
+
729
+ self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
730
+
731
+ self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size))
732
+
733
+ def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
734
+ query_layer = self.query(self.drop(query_layer))
735
+
736
+ dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1)
737
+ key_layer = torch.cat([key_layer, dummy_vec], axis=0)
738
+ key_layer = self.key(self.drop(key_layer))
739
+
740
+ query_layer = query_layer.view(
741
+ query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size
742
+ )
743
+ key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size)
744
+
745
+ relation_score = torch.matmul(
746
+ query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0)
747
+ ) # equivalent to torch.einsum("ibnd,jbnd->nbij", (query_layer, key_layer))
748
+
749
+ return relation_score
750
+
751
+
752
+ class BrosPreTrainedModel(PreTrainedModel):
753
+ """
754
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
755
+ models.
756
+ """
757
+
758
+ config_class = BrosConfig
759
+ base_model_prefix = "bros"
760
+
761
+ def _init_weights(self, module):
762
+ """Initialize the weights"""
763
+ if isinstance(module, nn.Linear):
764
+ # Slightly different from the TF version which uses truncated_normal for initialization
765
+ # cf https://github.com/pytorch/pytorch/pull/5617
766
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
767
+ if module.bias is not None:
768
+ module.bias.data.zero_()
769
+ elif isinstance(module, nn.Embedding):
770
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
771
+ if module.padding_idx is not None:
772
+ module.weight.data[module.padding_idx].zero_()
773
+ elif isinstance(module, nn.LayerNorm):
774
+ module.bias.data.zero_()
775
+ module.weight.data.fill_(1.0)
776
+
777
+
778
+ @add_start_docstrings(
779
+ "The bare Bros Model transformer outputting raw hidden-states without any specific head on top.",
780
+ BROS_START_DOCSTRING,
781
+ )
782
+ class BrosModel(BrosPreTrainedModel):
783
+ def __init__(self, config, add_pooling_layer=True):
784
+ super().__init__(config)
785
+ self.config = config
786
+
787
+ self.embeddings = BrosTextEmbeddings(config)
788
+ self.bbox_embeddings = BrosBboxEmbeddings(config)
789
+ self.encoder = BrosEncoder(config)
790
+
791
+ self.pooler = BrosPooler(config) if add_pooling_layer else None
792
+
793
+ self.init_weights()
794
+
795
+ def get_input_embeddings(self):
796
+ return self.embeddings.word_embeddings
797
+
798
+ def set_input_embeddings(self, value):
799
+ self.embeddings.word_embeddings = value
800
+
801
+ def _prune_heads(self, heads_to_prune):
802
+ """
803
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
804
+ class PreTrainedModel
805
+ """
806
+ for layer, heads in heads_to_prune.items():
807
+ self.encoder.layer[layer].attention.prune_heads(heads)
808
+
809
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
810
+ @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
811
+ def forward(
812
+ self,
813
+ input_ids: Optional[torch.Tensor] = None,
814
+ bbox: Optional[torch.Tensor] = None,
815
+ attention_mask: Optional[torch.Tensor] = None,
816
+ token_type_ids: Optional[torch.Tensor] = None,
817
+ position_ids: Optional[torch.Tensor] = None,
818
+ head_mask: Optional[torch.Tensor] = None,
819
+ inputs_embeds: Optional[torch.Tensor] = None,
820
+ encoder_hidden_states: Optional[torch.Tensor] = None,
821
+ encoder_attention_mask: Optional[torch.Tensor] = None,
822
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
823
+ use_cache: Optional[bool] = None,
824
+ output_attentions: Optional[bool] = None,
825
+ output_hidden_states: Optional[bool] = None,
826
+ return_dict: Optional[bool] = None,
827
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
828
+ r"""
829
+ Returns:
830
+
831
+ Examples:
832
+
833
+ ```python
834
+ >>> import torch
835
+ >>> from transformers import BrosProcessor, BrosModel
836
+
837
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
838
+
839
+ >>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
840
+
841
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
842
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
843
+ >>> encoding["bbox"] = bbox
844
+
845
+ >>> outputs = model(**encoding)
846
+ >>> last_hidden_states = outputs.last_hidden_state
847
+ ```"""
848
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
849
+ output_hidden_states = (
850
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
851
+ )
852
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
853
+
854
+ if self.config.is_decoder:
855
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
856
+ else:
857
+ use_cache = False
858
+
859
+ if input_ids is not None and inputs_embeds is not None:
860
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
861
+ elif input_ids is not None:
862
+ input_shape = input_ids.size()
863
+ elif inputs_embeds is not None:
864
+ input_shape = inputs_embeds.size()[:-1]
865
+ else:
866
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
867
+
868
+ if bbox is None:
869
+ raise ValueError("You have to specify bbox")
870
+
871
+ batch_size, seq_length = input_shape
872
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
873
+
874
+ # past_key_values_length
875
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
876
+
877
+ if attention_mask is None:
878
+ attention_mask = torch.ones(input_shape, device=device)
879
+
880
+ if token_type_ids is None:
881
+ if hasattr(self.embeddings, "token_type_ids"):
882
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
883
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
884
+ token_type_ids = buffered_token_type_ids_expanded
885
+ else:
886
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
887
+
888
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
889
+ # ourselves in which case we just need to make it broadcastable to all heads.
890
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
891
+
892
+ # If a 2D or 3D attention mask is provided for the cross-attention
893
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
894
+ if self.config.is_decoder and encoder_hidden_states is not None:
895
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
896
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
897
+ if encoder_attention_mask is None:
898
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
899
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
900
+ else:
901
+ encoder_extended_attention_mask = None
902
+
903
+ # Prepare head mask if needed
904
+ # 1.0 in head_mask indicate we keep the head
905
+ # attention_probs has shape bsz x n_heads x N x N
906
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
907
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
908
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
909
+
910
+ embedding_output = self.embeddings(
911
+ input_ids=input_ids,
912
+ position_ids=position_ids,
913
+ token_type_ids=token_type_ids,
914
+ inputs_embeds=inputs_embeds,
915
+ past_key_values_length=past_key_values_length,
916
+ )
917
+
918
+ # if bbox has 2 points (4 float tensors) per token, convert it to 4 points (8 float tensors) per token
919
+ if bbox.shape[-1] == 4:
920
+ bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]]
921
+ scaled_bbox = bbox * self.config.bbox_scale
922
+ bbox_position_embeddings = self.bbox_embeddings(scaled_bbox)
923
+
924
+ encoder_outputs = self.encoder(
925
+ embedding_output,
926
+ bbox_pos_emb=bbox_position_embeddings,
927
+ attention_mask=extended_attention_mask,
928
+ head_mask=head_mask,
929
+ encoder_hidden_states=encoder_hidden_states,
930
+ encoder_attention_mask=encoder_extended_attention_mask,
931
+ past_key_values=past_key_values,
932
+ use_cache=use_cache,
933
+ output_attentions=output_attentions,
934
+ output_hidden_states=output_hidden_states,
935
+ return_dict=return_dict,
936
+ )
937
+ sequence_output = encoder_outputs[0]
938
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
939
+
940
+ if not return_dict:
941
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
942
+
943
+ return BaseModelOutputWithPoolingAndCrossAttentions(
944
+ last_hidden_state=sequence_output,
945
+ pooler_output=pooled_output,
946
+ past_key_values=encoder_outputs.past_key_values,
947
+ hidden_states=encoder_outputs.hidden_states,
948
+ attentions=encoder_outputs.attentions,
949
+ cross_attentions=encoder_outputs.cross_attentions,
950
+ )
951
+
952
+
953
+ @add_start_docstrings(
954
+ """
955
+ Bros Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
956
+ Named-Entity-Recognition (NER) tasks.
957
+ """,
958
+ BROS_START_DOCSTRING,
959
+ )
960
+ class BrosForTokenClassification(BrosPreTrainedModel):
961
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
962
+
963
+ def __init__(self, config):
964
+ super().__init__(config)
965
+ self.num_labels = config.num_labels
966
+
967
+ self.bros = BrosModel(config)
968
+ classifier_dropout = (
969
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
970
+ )
971
+ self.dropout = nn.Dropout(classifier_dropout)
972
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
973
+
974
+ self.init_weights()
975
+
976
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
977
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
978
+ def forward(
979
+ self,
980
+ input_ids: Optional[torch.Tensor] = None,
981
+ bbox: Optional[torch.Tensor] = None,
982
+ attention_mask: Optional[torch.Tensor] = None,
983
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
984
+ token_type_ids: Optional[torch.Tensor] = None,
985
+ position_ids: Optional[torch.Tensor] = None,
986
+ head_mask: Optional[torch.Tensor] = None,
987
+ inputs_embeds: Optional[torch.Tensor] = None,
988
+ labels: Optional[torch.Tensor] = None,
989
+ output_attentions: Optional[bool] = None,
990
+ output_hidden_states: Optional[bool] = None,
991
+ return_dict: Optional[bool] = None,
992
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
993
+ r"""
994
+
995
+ Returns:
996
+
997
+ Examples:
998
+
999
+ ```python
1000
+ >>> import torch
1001
+ >>> from transformers import BrosProcessor, BrosForTokenClassification
1002
+
1003
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1004
+
1005
+ >>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1006
+
1007
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1008
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1009
+ >>> encoding["bbox"] = bbox
1010
+
1011
+ >>> outputs = model(**encoding)
1012
+ ```"""
1013
+
1014
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1015
+
1016
+ outputs = self.bros(
1017
+ input_ids,
1018
+ bbox=bbox,
1019
+ attention_mask=attention_mask,
1020
+ token_type_ids=token_type_ids,
1021
+ position_ids=position_ids,
1022
+ head_mask=head_mask,
1023
+ inputs_embeds=inputs_embeds,
1024
+ output_attentions=output_attentions,
1025
+ output_hidden_states=output_hidden_states,
1026
+ return_dict=return_dict,
1027
+ )
1028
+
1029
+ sequence_output = outputs[0]
1030
+
1031
+ sequence_output = self.dropout(sequence_output)
1032
+ logits = self.classifier(sequence_output)
1033
+
1034
+ loss = None
1035
+ if labels is not None:
1036
+ loss_fct = CrossEntropyLoss()
1037
+ if bbox_first_token_mask is not None:
1038
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
1039
+ loss = loss_fct(
1040
+ logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask]
1041
+ )
1042
+ else:
1043
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1044
+
1045
+ if not return_dict:
1046
+ output = (logits,) + outputs[2:]
1047
+ return ((loss,) + output) if loss is not None else output
1048
+
1049
+ return TokenClassifierOutput(
1050
+ loss=loss,
1051
+ logits=logits,
1052
+ hidden_states=outputs.hidden_states,
1053
+ attentions=outputs.attentions,
1054
+ )
1055
+
1056
+
1057
+ @add_start_docstrings(
1058
+ """
1059
+ Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the
1060
+ hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to
1061
+ predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent
1062
+ tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors
1063
+ since it predicts next token from one token.
1064
+ """,
1065
+ BROS_START_DOCSTRING,
1066
+ )
1067
+ class BrosSpadeEEForTokenClassification(BrosPreTrainedModel):
1068
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1069
+
1070
+ def __init__(self, config):
1071
+ super().__init__(config)
1072
+ self.config = config
1073
+ self.num_labels = config.num_labels
1074
+ self.n_relations = config.n_relations
1075
+ self.backbone_hidden_size = config.hidden_size
1076
+
1077
+ self.bros = BrosModel(config)
1078
+ classifier_dropout = (
1079
+ config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob
1080
+ )
1081
+
1082
+ # Initial token classification for Entity Extraction (NER)
1083
+ self.initial_token_classifier = nn.Sequential(
1084
+ nn.Dropout(classifier_dropout),
1085
+ nn.Linear(config.hidden_size, config.hidden_size),
1086
+ nn.Dropout(classifier_dropout),
1087
+ nn.Linear(config.hidden_size, config.num_labels),
1088
+ )
1089
+
1090
+ # Subsequent token classification for Entity Extraction (NER)
1091
+ self.subsequent_token_classifier = BrosRelationExtractor(config)
1092
+
1093
+ self.init_weights()
1094
+
1095
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1096
+ @replace_return_docstrings(output_type=BrosSpadeOutput, config_class=_CONFIG_FOR_DOC)
1097
+ def forward(
1098
+ self,
1099
+ input_ids: Optional[torch.Tensor] = None,
1100
+ bbox: Optional[torch.Tensor] = None,
1101
+ attention_mask: Optional[torch.Tensor] = None,
1102
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
1103
+ token_type_ids: Optional[torch.Tensor] = None,
1104
+ position_ids: Optional[torch.Tensor] = None,
1105
+ head_mask: Optional[torch.Tensor] = None,
1106
+ inputs_embeds: Optional[torch.Tensor] = None,
1107
+ initial_token_labels: Optional[torch.Tensor] = None,
1108
+ subsequent_token_labels: Optional[torch.Tensor] = None,
1109
+ output_attentions: Optional[bool] = None,
1110
+ output_hidden_states: Optional[bool] = None,
1111
+ return_dict: Optional[bool] = None,
1112
+ ) -> Union[Tuple[torch.Tensor], BrosSpadeOutput]:
1113
+ r"""
1114
+ Returns:
1115
+
1116
+ Examples:
1117
+
1118
+ ```python
1119
+ >>> import torch
1120
+ >>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification
1121
+
1122
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1123
+
1124
+ >>> model = BrosSpadeEEForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1125
+
1126
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1127
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1128
+ >>> encoding["bbox"] = bbox
1129
+
1130
+ >>> outputs = model(**encoding)
1131
+ ```"""
1132
+
1133
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1134
+
1135
+ outputs = self.bros(
1136
+ input_ids=input_ids,
1137
+ bbox=bbox,
1138
+ attention_mask=attention_mask,
1139
+ token_type_ids=token_type_ids,
1140
+ position_ids=position_ids,
1141
+ head_mask=head_mask,
1142
+ inputs_embeds=inputs_embeds,
1143
+ output_attentions=output_attentions,
1144
+ output_hidden_states=output_hidden_states,
1145
+ return_dict=return_dict,
1146
+ )
1147
+
1148
+ last_hidden_states = outputs[0]
1149
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
1150
+ initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous()
1151
+ subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0)
1152
+
1153
+ # make subsequent token (sequence token classification) mask
1154
+ inv_attention_mask = 1 - attention_mask
1155
+ batch_size, max_seq_length = inv_attention_mask.shape
1156
+ device = inv_attention_mask.device
1157
+ invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool()
1158
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
1159
+ invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min
1160
+ )
1161
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
1162
+ subsequent_token_logits = subsequent_token_logits.masked_fill(
1163
+ self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min
1164
+ )
1165
+ subsequent_token_mask = attention_mask.view(-1).bool()
1166
+
1167
+ loss = None
1168
+ if initial_token_labels is not None and subsequent_token_labels is not None:
1169
+ loss_fct = CrossEntropyLoss()
1170
+
1171
+ # get initial token loss
1172
+ initial_token_labels = initial_token_labels.view(-1)
1173
+ if bbox_first_token_mask is not None:
1174
+ bbox_first_token_mask = bbox_first_token_mask.view(-1)
1175
+ initial_token_loss = loss_fct(
1176
+ initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask],
1177
+ initial_token_labels[bbox_first_token_mask],
1178
+ )
1179
+ else:
1180
+ initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels)
1181
+
1182
+ subsequent_token_labels = subsequent_token_labels.view(-1)
1183
+ subsequent_token_loss = loss_fct(
1184
+ subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask],
1185
+ subsequent_token_labels[subsequent_token_mask],
1186
+ )
1187
+
1188
+ loss = initial_token_loss + subsequent_token_loss
1189
+
1190
+ if not return_dict:
1191
+ output = (initial_token_logits, subsequent_token_logits) + outputs[2:]
1192
+ return ((loss,) + output) if loss is not None else output
1193
+
1194
+ return BrosSpadeOutput(
1195
+ loss=loss,
1196
+ initial_token_logits=initial_token_logits,
1197
+ subsequent_token_logits=subsequent_token_logits,
1198
+ hidden_states=outputs.hidden_states,
1199
+ attentions=outputs.attentions,
1200
+ )
1201
+
1202
+
1203
+ @add_start_docstrings(
1204
+ """
1205
+ Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g.
1206
+ for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity).
1207
+ """,
1208
+ BROS_START_DOCSTRING,
1209
+ )
1210
+ class BrosSpadeELForTokenClassification(BrosPreTrainedModel):
1211
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1212
+
1213
+ def __init__(self, config):
1214
+ super().__init__(config)
1215
+ self.config = config
1216
+ self.num_labels = config.num_labels
1217
+ self.n_relations = config.n_relations
1218
+ self.backbone_hidden_size = config.hidden_size
1219
+
1220
+ self.bros = BrosModel(config)
1221
+ (config.classifier_dropout if hasattr(config, "classifier_dropout") else config.hidden_dropout_prob)
1222
+
1223
+ self.entity_linker = BrosRelationExtractor(config)
1224
+
1225
+ self.init_weights()
1226
+
1227
+ @add_start_docstrings_to_model_forward(BROS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1228
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
1229
+ def forward(
1230
+ self,
1231
+ input_ids: Optional[torch.Tensor] = None,
1232
+ bbox: Optional[torch.Tensor] = None,
1233
+ attention_mask: Optional[torch.Tensor] = None,
1234
+ bbox_first_token_mask: Optional[torch.Tensor] = None,
1235
+ token_type_ids: Optional[torch.Tensor] = None,
1236
+ position_ids: Optional[torch.Tensor] = None,
1237
+ head_mask: Optional[torch.Tensor] = None,
1238
+ inputs_embeds: Optional[torch.Tensor] = None,
1239
+ labels: Optional[torch.Tensor] = None,
1240
+ output_attentions: Optional[bool] = None,
1241
+ output_hidden_states: Optional[bool] = None,
1242
+ return_dict: Optional[bool] = None,
1243
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1244
+ r"""
1245
+ Returns:
1246
+
1247
+ Examples:
1248
+
1249
+ ```python
1250
+ >>> import torch
1251
+ >>> from transformers import BrosProcessor, BrosSpadeELForTokenClassification
1252
+
1253
+ >>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
1254
+
1255
+ >>> model = BrosSpadeELForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
1256
+
1257
+ >>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
1258
+ >>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
1259
+ >>> encoding["bbox"] = bbox
1260
+
1261
+ >>> outputs = model(**encoding)
1262
+ ```"""
1263
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1264
+
1265
+ outputs = self.bros(
1266
+ input_ids=input_ids,
1267
+ bbox=bbox,
1268
+ attention_mask=attention_mask,
1269
+ token_type_ids=token_type_ids,
1270
+ position_ids=position_ids,
1271
+ head_mask=head_mask,
1272
+ inputs_embeds=inputs_embeds,
1273
+ output_attentions=output_attentions,
1274
+ output_hidden_states=output_hidden_states,
1275
+ return_dict=return_dict,
1276
+ )
1277
+
1278
+ last_hidden_states = outputs[0]
1279
+ last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
1280
+
1281
+ logits = self.entity_linker(last_hidden_states, last_hidden_states).squeeze(0)
1282
+
1283
+ loss = None
1284
+ if labels is not None:
1285
+ loss_fct = CrossEntropyLoss()
1286
+
1287
+ batch_size, max_seq_length = attention_mask.shape
1288
+ device = attention_mask.device
1289
+
1290
+ self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device).bool()
1291
+
1292
+ mask = bbox_first_token_mask.view(-1)
1293
+ bbox_first_token_mask = torch.cat(
1294
+ [
1295
+ ~bbox_first_token_mask,
1296
+ torch.zeros([batch_size, 1], dtype=torch.bool).to(device),
1297
+ ],
1298
+ axis=1,
1299
+ )
1300
+ logits = logits.masked_fill(bbox_first_token_mask[:, None, :], torch.finfo(logits.dtype).min)
1301
+ logits = logits.masked_fill(self_token_mask[None, :, :], torch.finfo(logits.dtype).min)
1302
+
1303
+ loss = loss_fct(logits.view(-1, max_seq_length + 1)[mask], labels.view(-1)[mask])
1304
+
1305
+ if not return_dict:
1306
+ output = (logits,) + outputs[2:]
1307
+ return ((loss,) + output) if loss is not None else output
1308
+
1309
+ return TokenClassifierOutput(
1310
+ loss=loss,
1311
+ logits=logits,
1312
+ hidden_states=outputs.hidden_states,
1313
+ attentions=outputs.attentions,
1314
+ )
1315
+
1316
+
1317
+ __all__ = [
1318
+ "BrosPreTrainedModel",
1319
+ "BrosModel",
1320
+ "BrosForTokenClassification",
1321
+ "BrosSpadeEEForTokenClassification",
1322
+ "BrosSpadeELForTokenClassification",
1323
+ ]
janus/lib/python3.10/site-packages/transformers/models/bros/processing_bros.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for Bros.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
23
+ from ...utils import TensorType
24
+
25
+
26
+ class BrosProcessor(ProcessorMixin):
27
+ r"""
28
+ Constructs a Bros processor which wraps a BERT tokenizer.
29
+
30
+ [`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
31
+ [`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
32
+
33
+ Args:
34
+ tokenizer (`BertTokenizerFast`, *optional*):
35
+ An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
36
+ """
37
+
38
+ attributes = ["tokenizer"]
39
+ tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
40
+
41
+ def __init__(self, tokenizer=None, **kwargs):
42
+ if tokenizer is None:
43
+ raise ValueError("You need to specify a `tokenizer`.")
44
+
45
+ super().__init__(tokenizer)
46
+
47
+ def __call__(
48
+ self,
49
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
50
+ add_special_tokens: bool = True,
51
+ padding: Union[bool, str, PaddingStrategy] = False,
52
+ truncation: Union[bool, str, TruncationStrategy] = None,
53
+ max_length: Optional[int] = None,
54
+ stride: int = 0,
55
+ pad_to_multiple_of: Optional[int] = None,
56
+ return_token_type_ids: Optional[bool] = None,
57
+ return_attention_mask: Optional[bool] = None,
58
+ return_overflowing_tokens: bool = False,
59
+ return_special_tokens_mask: bool = False,
60
+ return_offsets_mapping: bool = False,
61
+ return_length: bool = False,
62
+ verbose: bool = True,
63
+ return_tensors: Optional[Union[str, TensorType]] = None,
64
+ **kwargs,
65
+ ) -> BatchEncoding:
66
+ """
67
+ This method uses [`BertTokenizerFast.__call__`] to prepare text for the model.
68
+
69
+ Please refer to the docstring of the above two methods for more information.
70
+ """
71
+ encoding = self.tokenizer(
72
+ text=text,
73
+ add_special_tokens=add_special_tokens,
74
+ padding=padding,
75
+ truncation=truncation,
76
+ max_length=max_length,
77
+ stride=stride,
78
+ pad_to_multiple_of=pad_to_multiple_of,
79
+ return_token_type_ids=return_token_type_ids,
80
+ return_attention_mask=return_attention_mask,
81
+ return_overflowing_tokens=return_overflowing_tokens,
82
+ return_special_tokens_mask=return_special_tokens_mask,
83
+ return_offsets_mapping=return_offsets_mapping,
84
+ return_length=return_length,
85
+ verbose=verbose,
86
+ return_tensors=return_tensors,
87
+ **kwargs,
88
+ )
89
+
90
+ return encoding
91
+
92
+ def batch_decode(self, *args, **kwargs):
93
+ """
94
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
95
+ refer to the docstring of this method for more information.
96
+ """
97
+ return self.tokenizer.batch_decode(*args, **kwargs)
98
+
99
+ def decode(self, *args, **kwargs):
100
+ """
101
+ This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
102
+ the docstring of this method for more information.
103
+ """
104
+ return self.tokenizer.decode(*args, **kwargs)
105
+
106
+ @property
107
+ def model_input_names(self):
108
+ tokenizer_input_names = self.tokenizer.model_input_names
109
+ return list(dict.fromkeys(tokenizer_input_names))
110
+
111
+
112
+ __all__ = ["BrosProcessor"]
janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (622 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc ADDED
Binary file (17.5 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc ADDED
Binary file (9.24 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc ADDED
Binary file (63.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """English Normalizer class for CLVP."""
17
+
18
+ import re
19
+
20
+
21
+ class EnglishNormalizer:
22
+ def __init__(self):
23
+ # List of (regular expression, replacement) pairs for abbreviations:
24
+ self._abbreviations = [
25
+ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
26
+ for x in [
27
+ ("mrs", "misess"),
28
+ ("mr", "mister"),
29
+ ("dr", "doctor"),
30
+ ("st", "saint"),
31
+ ("co", "company"),
32
+ ("jr", "junior"),
33
+ ("maj", "major"),
34
+ ("gen", "general"),
35
+ ("drs", "doctors"),
36
+ ("rev", "reverend"),
37
+ ("lt", "lieutenant"),
38
+ ("hon", "honorable"),
39
+ ("sgt", "sergeant"),
40
+ ("capt", "captain"),
41
+ ("esq", "esquire"),
42
+ ("ltd", "limited"),
43
+ ("col", "colonel"),
44
+ ("ft", "fort"),
45
+ ]
46
+ ]
47
+
48
+ self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
49
+ self.teens = [
50
+ "ten",
51
+ "eleven",
52
+ "twelve",
53
+ "thirteen",
54
+ "fourteen",
55
+ "fifteen",
56
+ "sixteen",
57
+ "seventeen",
58
+ "eighteen",
59
+ "nineteen",
60
+ ]
61
+ self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
62
+
63
+ def number_to_words(self, num: int) -> str:
64
+ """
65
+ Converts numbers(`int`) to words(`str`).
66
+
67
+ Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine
68
+ trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine
69
+ thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`.
70
+ """
71
+ if num == 0:
72
+ return "zero"
73
+ elif num < 0:
74
+ return "minus " + self.number_to_words(abs(num))
75
+ elif num < 10:
76
+ return self.ones[num]
77
+ elif num < 20:
78
+ return self.teens[num - 10]
79
+ elif num < 100:
80
+ return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "")
81
+ elif num < 1000:
82
+ return (
83
+ self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "")
84
+ )
85
+ elif num < 1_000_000:
86
+ return (
87
+ self.number_to_words(num // 1000)
88
+ + " thousand"
89
+ + (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "")
90
+ )
91
+ elif num < 1_000_000_000:
92
+ return (
93
+ self.number_to_words(num // 1_000_000)
94
+ + " million"
95
+ + (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "")
96
+ )
97
+ elif num < 1_000_000_000_000:
98
+ return (
99
+ self.number_to_words(num // 1_000_000_000)
100
+ + " billion"
101
+ + (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "")
102
+ )
103
+ elif num < 1_000_000_000_000_000:
104
+ return (
105
+ self.number_to_words(num // 1_000_000_000_000)
106
+ + " trillion"
107
+ + (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "")
108
+ )
109
+ elif num < 1_000_000_000_000_000_000:
110
+ return (
111
+ self.number_to_words(num // 1_000_000_000_000_000)
112
+ + " quadrillion"
113
+ + (
114
+ ", " + self.number_to_words(num % 1_000_000_000_000_000)
115
+ if num % 1_000_000_000_000_000 != 0
116
+ else ""
117
+ )
118
+ )
119
+ else:
120
+ return "number out of range"
121
+
122
+ def convert_to_ascii(self, text: str) -> str:
123
+ """
124
+ Converts unicode to ascii
125
+ """
126
+ return text.encode("ascii", "ignore").decode("utf-8")
127
+
128
+ def _expand_dollars(self, m: str) -> str:
129
+ """
130
+ This method is used to expand numerical dollar values into spoken words.
131
+ """
132
+ match = m.group(1)
133
+ parts = match.split(".")
134
+ if len(parts) > 2:
135
+ return match + " dollars" # Unexpected format
136
+
137
+ dollars = int(parts[0]) if parts[0] else 0
138
+ cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
139
+ if dollars and cents:
140
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
141
+ cent_unit = "cent" if cents == 1 else "cents"
142
+ return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
143
+ elif dollars:
144
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
145
+ return "%s %s" % (dollars, dollar_unit)
146
+ elif cents:
147
+ cent_unit = "cent" if cents == 1 else "cents"
148
+ return "%s %s" % (cents, cent_unit)
149
+ else:
150
+ return "zero dollars"
151
+
152
+ def _remove_commas(self, m: str) -> str:
153
+ """
154
+ This method is used to remove commas from sentences.
155
+ """
156
+ return m.group(1).replace(",", "")
157
+
158
+ def _expand_decimal_point(self, m: str) -> str:
159
+ """
160
+ This method is used to expand '.' into spoken word ' point '.
161
+ """
162
+ return m.group(1).replace(".", " point ")
163
+
164
+ def _expand_ordinal(self, num: str) -> str:
165
+ """
166
+ This method is used to expand ordinals such as '1st', '2nd' into spoken words.
167
+ """
168
+ ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"}
169
+
170
+ num = int(num.group(0)[:-2])
171
+ if 10 <= num % 100 and num % 100 <= 20:
172
+ suffix = "th"
173
+ else:
174
+ suffix = ordinal_suffixes.get(num % 10, "th")
175
+ return self.number_to_words(num) + suffix
176
+
177
+ def _expand_number(self, m: str) -> str:
178
+ """
179
+ This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository,
180
+ link :
181
+ https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86)
182
+ """
183
+ num = int(m.group(0))
184
+
185
+ if num > 1000 and num < 3000:
186
+ if num == 2000:
187
+ return "two thousand"
188
+ elif num > 2000 and num < 2010:
189
+ return "two thousand " + self.number_to_words(num % 100)
190
+ elif num % 100 == 0:
191
+ return self.number_to_words(num // 100) + " hundred"
192
+ else:
193
+ return self.number_to_words(num)
194
+ else:
195
+ return self.number_to_words(num)
196
+
197
+ def normalize_numbers(self, text: str) -> str:
198
+ """
199
+ This method is used to normalize numbers within a text such as converting the numbers to words, removing
200
+ commas, etc.
201
+ """
202
+ text = re.sub(re.compile(r"([0-9][0-9\,]+[0-9])"), self._remove_commas, text)
203
+ text = re.sub(re.compile(r"£([0-9\,]*[0-9]+)"), r"\1 pounds", text)
204
+ text = re.sub(re.compile(r"\$([0-9\.\,]*[0-9]+)"), self._expand_dollars, text)
205
+ text = re.sub(re.compile(r"([0-9]+\.[0-9]+)"), self._expand_decimal_point, text)
206
+ text = re.sub(re.compile(r"[0-9]+(st|nd|rd|th)"), self._expand_ordinal, text)
207
+ text = re.sub(re.compile(r"[0-9]+"), self._expand_number, text)
208
+ return text
209
+
210
+ def expand_abbreviations(self, text: str) -> str:
211
+ """
212
+ Expands the abbreviate words.
213
+ """
214
+ for regex, replacement in self._abbreviations:
215
+ text = re.sub(regex, replacement, text)
216
+ return text
217
+
218
+ def collapse_whitespace(self, text: str) -> str:
219
+ """
220
+ Removes multiple whitespaces
221
+ """
222
+ return re.sub(re.compile(r"\s+"), " ", text)
223
+
224
+ def __call__(self, text):
225
+ """
226
+ Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands
227
+ abbreviations
228
+ """
229
+
230
+ text = self.convert_to_ascii(text)
231
+ text = text.lower()
232
+ text = self.normalize_numbers(text)
233
+ text = self.expand_abbreviations(text)
234
+ text = self.collapse_whitespace(text)
235
+ text = text.replace('"', "")
236
+
237
+ return text
janus/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Processor class for CLVP
18
+ """
19
+
20
+ from ...processing_utils import ProcessorMixin
21
+
22
+
23
+ class ClvpProcessor(ProcessorMixin):
24
+ r"""
25
+ Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor.
26
+
27
+ [`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the
28
+ [`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information.
29
+
30
+ Args:
31
+ feature_extractor (`ClvpFeatureExtractor`):
32
+ An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input.
33
+ tokenizer (`ClvpTokenizer`):
34
+ An instance of [`ClvpTokenizer`]. The tokenizer is a required input.
35
+ """
36
+
37
+ feature_extractor_class = "ClvpFeatureExtractor"
38
+ tokenizer_class = "ClvpTokenizer"
39
+ model_input_names = [
40
+ "input_ids",
41
+ "input_features",
42
+ "attention_mask",
43
+ ]
44
+
45
+ def __init__(self, feature_extractor, tokenizer):
46
+ super().__init__(feature_extractor, tokenizer)
47
+
48
+ def __call__(self, *args, **kwargs):
49
+ """
50
+ Forwards the `audio` and `sampling_rate` arguments to [`~ClvpFeatureExtractor.__call__`] and the `text`
51
+ argument to [`~ClvpTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
52
+ information.
53
+ """
54
+
55
+ raw_speech = kwargs.pop("raw_speech", None)
56
+ sampling_rate = kwargs.pop("sampling_rate", None)
57
+ text = kwargs.pop("text", None)
58
+
59
+ if raw_speech is None and text is None:
60
+ raise ValueError("You need to specify either an `raw_speech` or `text` input to process.")
61
+
62
+ if raw_speech is not None:
63
+ inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs)
64
+ if text is not None:
65
+ encodings = self.tokenizer(text, **kwargs)
66
+
67
+ if text is None:
68
+ return inputs
69
+ elif raw_speech is None:
70
+ return encodings
71
+ else:
72
+ inputs["input_ids"] = encodings["input_ids"]
73
+ inputs["attention_mask"] = encodings["attention_mask"]
74
+ return inputs
75
+
76
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp
77
+ def batch_decode(self, *args, **kwargs):
78
+ """
79
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
80
+ refer to the docstring of this method for more information.
81
+ """
82
+ return self.tokenizer.batch_decode(*args, **kwargs)
83
+
84
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp
85
+ def decode(self, *args, **kwargs):
86
+ """
87
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
88
+ the docstring of this method for more information.
89
+ """
90
+ return self.tokenizer.decode(*args, **kwargs)
91
+
92
+
93
+ __all__ = ["ClvpProcessor"]
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (582 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py ADDED
@@ -0,0 +1,946 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_dinov2_with_registers.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+
23
+ import collections.abc
24
+ import math
25
+ from typing import Dict, List, Optional, Set, Tuple, Union
26
+
27
+ import torch
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from ...activations import ACT2FN
32
+ from ...modeling_outputs import BackboneOutput, BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import (
36
+ add_code_sample_docstrings,
37
+ add_start_docstrings,
38
+ add_start_docstrings_to_model_forward,
39
+ logging,
40
+ replace_return_docstrings,
41
+ torch_int,
42
+ )
43
+ from ...utils.backbone_utils import BackboneMixin
44
+ from .configuration_dinov2_with_registers import Dinov2WithRegistersConfig
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ # Base docstring
50
+ _CHECKPOINT_FOR_DOC = "facebook/dinov2_with_registers-base"
51
+
52
+ # General docstring
53
+ _CONFIG_FOR_DOC = "Dinov2WithRegistersConfig"
54
+
55
+
56
+ class Dinov2WithRegistersPatchEmbeddings(nn.Module):
57
+ """
58
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
59
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
60
+ Transformer.
61
+ """
62
+
63
+ def __init__(self, config):
64
+ super().__init__()
65
+ image_size, patch_size = config.image_size, config.patch_size
66
+ num_channels, hidden_size = config.num_channels, config.hidden_size
67
+
68
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
69
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
70
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
71
+ self.image_size = image_size
72
+ self.patch_size = patch_size
73
+ self.num_channels = num_channels
74
+ self.num_patches = num_patches
75
+
76
+ self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
77
+
78
+ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
79
+ num_channels = pixel_values.shape[1]
80
+ if num_channels != self.num_channels:
81
+ raise ValueError(
82
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
83
+ f" Expected {self.num_channels} but got {num_channels}."
84
+ )
85
+ embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
86
+ return embeddings
87
+
88
+
89
+ class Dinov2WithRegistersEmbeddings(nn.Module):
90
+ """
91
+ Construct the CLS token, mask token, register tokens, position and patch embeddings.
92
+ """
93
+
94
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
95
+ super().__init__()
96
+
97
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
98
+ self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
99
+ self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
100
+ self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
101
+ num_patches = self.patch_embeddings.num_patches
102
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
103
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
104
+ self.patch_size = config.patch_size
105
+ self.config = config
106
+
107
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
108
+ """
109
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
110
+ resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
111
+ with the original implementation.
112
+
113
+ Adapted from:
114
+ - https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
115
+ - https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
116
+ """
117
+ num_patches = embeddings.shape[1] - 1
118
+ num_positions = self.position_embeddings.shape[1] - 1
119
+
120
+ # Skip interpolation for matching dimensions (unless tracing)
121
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
122
+ return self.position_embeddings
123
+
124
+ # Handle class token and patch embeddings separately
125
+ class_pos_embed = self.position_embeddings[:, 0]
126
+ patch_pos_embed = self.position_embeddings[:, 1:]
127
+ dim = embeddings.shape[-1]
128
+
129
+ # Calculate new dimensions
130
+ height = height // self.config.patch_size
131
+ width = width // self.config.patch_size
132
+
133
+ # Reshape for interpolation
134
+ sqrt_num_positions = torch_int(num_positions**0.5)
135
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
136
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
137
+
138
+ # Store original dtype for restoration after interpolation
139
+ target_dtype = patch_pos_embed.dtype
140
+
141
+ # Interpolate at float32 precision
142
+ patch_pos_embed = nn.functional.interpolate(
143
+ patch_pos_embed.to(dtype=torch.float32),
144
+ size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
145
+ mode="bicubic",
146
+ align_corners=False,
147
+ antialias=True,
148
+ ).to(dtype=target_dtype)
149
+
150
+ # Validate output dimensions if not tracing
151
+ if not torch.jit.is_tracing():
152
+ if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
153
+ raise ValueError("Width or height does not match with the interpolated position embeddings")
154
+
155
+ # Reshape back to original format
156
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
157
+
158
+ # Combine class and patch embeddings
159
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
160
+
161
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
162
+ batch_size, _, height, width = pixel_values.shape
163
+ target_dtype = self.patch_embeddings.projection.weight.dtype
164
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
165
+
166
+ if bool_masked_pos is not None:
167
+ embeddings = torch.where(
168
+ bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
169
+ )
170
+
171
+ # add the [CLS] token to the embedded patch tokens
172
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
173
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
174
+
175
+ # add positional encoding to each token
176
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
177
+
178
+ # add register tokens
179
+ embeddings = torch.cat(
180
+ (embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
181
+ )
182
+
183
+ embeddings = self.dropout(embeddings)
184
+
185
+ return embeddings
186
+
187
+
188
+ class Dinov2WithRegistersSelfAttention(nn.Module):
189
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
190
+ super().__init__()
191
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
192
+ raise ValueError(
193
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
194
+ f"heads {config.num_attention_heads}."
195
+ )
196
+
197
+ self.num_attention_heads = config.num_attention_heads
198
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
199
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
200
+
201
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
202
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
203
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
204
+
205
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
206
+
207
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
208
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
209
+ x = x.view(new_x_shape)
210
+ return x.permute(0, 2, 1, 3)
211
+
212
+ def forward(
213
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
214
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
215
+ mixed_query_layer = self.query(hidden_states)
216
+
217
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
218
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
219
+ query_layer = self.transpose_for_scores(mixed_query_layer)
220
+
221
+ # Take the dot product between "query" and "key" to get the raw attention scores.
222
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
223
+
224
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
225
+
226
+ # Normalize the attention scores to probabilities.
227
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
228
+
229
+ # This is actually dropping out entire tokens to attend to, which might
230
+ # seem a bit unusual, but is taken from the original Transformer paper.
231
+ attention_probs = self.dropout(attention_probs)
232
+
233
+ # Mask heads if we want to
234
+ if head_mask is not None:
235
+ attention_probs = attention_probs * head_mask
236
+
237
+ context_layer = torch.matmul(attention_probs, value_layer)
238
+
239
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
240
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
241
+ context_layer = context_layer.view(new_context_layer_shape)
242
+
243
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
244
+
245
+ return outputs
246
+
247
+
248
+ class Dinov2WithRegistersSdpaSelfAttention(Dinov2WithRegistersSelfAttention):
249
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
250
+ super().__init__(config)
251
+ self.attention_probs_dropout_prob = config.attention_probs_dropout_prob
252
+
253
+ def forward(
254
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
255
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
256
+ if output_attentions:
257
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
258
+ logger.warning_once(
259
+ "Dinov2WithRegistersModel is using Dinov2WithRegistersSdpaSelfAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
260
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
261
+ )
262
+ return super().forward(
263
+ hidden_states=hidden_states, head_mask=head_mask, output_attentions=output_attentions
264
+ )
265
+
266
+ mixed_query_layer = self.query(hidden_states)
267
+
268
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
269
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
270
+ query_layer = self.transpose_for_scores(mixed_query_layer)
271
+
272
+ context_layer = torch.nn.functional.scaled_dot_product_attention(
273
+ query_layer,
274
+ key_layer,
275
+ value_layer,
276
+ head_mask,
277
+ self.attention_probs_dropout_prob if self.training else 0.0,
278
+ is_causal=False,
279
+ scale=None,
280
+ )
281
+
282
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
283
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
284
+ context_layer = context_layer.view(new_context_layer_shape)
285
+
286
+ return context_layer, None
287
+
288
+
289
+ class Dinov2WithRegistersSelfOutput(nn.Module):
290
+ """
291
+ The residual connection is defined in Dinov2WithRegistersLayer instead of here (as is the case with other models), due to the
292
+ layernorm applied before each block.
293
+ """
294
+
295
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
296
+ super().__init__()
297
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
298
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
299
+
300
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
301
+ hidden_states = self.dense(hidden_states)
302
+ hidden_states = self.dropout(hidden_states)
303
+
304
+ return hidden_states
305
+
306
+
307
+ class Dinov2WithRegistersAttention(nn.Module):
308
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
309
+ super().__init__()
310
+ self.attention = Dinov2WithRegistersSelfAttention(config)
311
+ self.output = Dinov2WithRegistersSelfOutput(config)
312
+ self.pruned_heads = set()
313
+
314
+ def prune_heads(self, heads: Set[int]) -> None:
315
+ if len(heads) == 0:
316
+ return
317
+ heads, index = find_pruneable_heads_and_indices(
318
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
319
+ )
320
+
321
+ # Prune linear layers
322
+ self.attention.query = prune_linear_layer(self.attention.query, index)
323
+ self.attention.key = prune_linear_layer(self.attention.key, index)
324
+ self.attention.value = prune_linear_layer(self.attention.value, index)
325
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
326
+
327
+ # Update hyper params and store pruned heads
328
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
329
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
330
+ self.pruned_heads = self.pruned_heads.union(heads)
331
+
332
+ def forward(
333
+ self,
334
+ hidden_states: torch.Tensor,
335
+ head_mask: Optional[torch.Tensor] = None,
336
+ output_attentions: bool = False,
337
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
338
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
339
+
340
+ attention_output = self.output(self_outputs[0], hidden_states)
341
+
342
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
343
+ return outputs
344
+
345
+
346
+ class Dinov2WithRegistersSdpaAttention(Dinov2WithRegistersAttention):
347
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
348
+ super().__init__(config)
349
+ self.attention = Dinov2WithRegistersSdpaSelfAttention(config)
350
+
351
+
352
+ class Dinov2WithRegistersLayerScale(nn.Module):
353
+ def __init__(self, config) -> None:
354
+ super().__init__()
355
+ self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
356
+
357
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
358
+ return hidden_state * self.lambda1
359
+
360
+
361
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
362
+ """
363
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
364
+
365
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
366
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
367
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
368
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
369
+ argument.
370
+ """
371
+ if drop_prob == 0.0 or not training:
372
+ return input
373
+ keep_prob = 1 - drop_prob
374
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
375
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
376
+ random_tensor.floor_() # binarize
377
+ output = input.div(keep_prob) * random_tensor
378
+ return output
379
+
380
+
381
+ class Dinov2WithRegistersDropPath(nn.Module):
382
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
383
+
384
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
385
+ super().__init__()
386
+ self.drop_prob = drop_prob
387
+
388
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
389
+ return drop_path(hidden_states, self.drop_prob, self.training)
390
+
391
+ def extra_repr(self) -> str:
392
+ return "p={}".format(self.drop_prob)
393
+
394
+
395
+ class Dinov2WithRegistersMLP(nn.Module):
396
+ def __init__(self, config) -> None:
397
+ super().__init__()
398
+ in_features = out_features = config.hidden_size
399
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
400
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
401
+ if isinstance(config.hidden_act, str):
402
+ self.activation = ACT2FN[config.hidden_act]
403
+ else:
404
+ self.activation = config.hidden_act
405
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=True)
406
+
407
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
408
+ hidden_state = self.fc1(hidden_state)
409
+ hidden_state = self.activation(hidden_state)
410
+ hidden_state = self.fc2(hidden_state)
411
+ return hidden_state
412
+
413
+
414
+ class Dinov2WithRegistersSwiGLUFFN(nn.Module):
415
+ def __init__(self, config) -> None:
416
+ super().__init__()
417
+ in_features = out_features = config.hidden_size
418
+ hidden_features = int(config.hidden_size * config.mlp_ratio)
419
+ hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
420
+
421
+ self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True)
422
+ self.weights_out = nn.Linear(hidden_features, out_features, bias=True)
423
+
424
+ def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
425
+ hidden_state = self.weights_in(hidden_state)
426
+ x1, x2 = hidden_state.chunk(2, dim=-1)
427
+ hidden = nn.functional.silu(x1) * x2
428
+ return self.weights_out(hidden)
429
+
430
+
431
+ DINOV2_WITH_REGISTERS_ATTENTION_CLASSES = {
432
+ "eager": Dinov2WithRegistersAttention,
433
+ "sdpa": Dinov2WithRegistersSdpaAttention,
434
+ }
435
+
436
+
437
+ class Dinov2WithRegistersLayer(nn.Module):
438
+ """This corresponds to the Block class in the original implementation."""
439
+
440
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
441
+ super().__init__()
442
+
443
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
444
+ self.attention = DINOV2_WITH_REGISTERS_ATTENTION_CLASSES[config._attn_implementation](config)
445
+ self.layer_scale1 = Dinov2WithRegistersLayerScale(config)
446
+ self.drop_path = (
447
+ Dinov2WithRegistersDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
448
+ )
449
+
450
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
451
+
452
+ if config.use_swiglu_ffn:
453
+ self.mlp = Dinov2WithRegistersSwiGLUFFN(config)
454
+ else:
455
+ self.mlp = Dinov2WithRegistersMLP(config)
456
+ self.layer_scale2 = Dinov2WithRegistersLayerScale(config)
457
+
458
+ def forward(
459
+ self,
460
+ hidden_states: torch.Tensor,
461
+ head_mask: Optional[torch.Tensor] = None,
462
+ output_attentions: bool = False,
463
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
464
+ self_attention_outputs = self.attention(
465
+ self.norm1(hidden_states), # in Dinov2WithRegisters, layernorm is applied before self-attention
466
+ head_mask,
467
+ output_attentions=output_attentions,
468
+ )
469
+ attention_output = self_attention_outputs[0]
470
+
471
+ attention_output = self.layer_scale1(attention_output)
472
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
473
+
474
+ # first residual connection
475
+ hidden_states = self.drop_path(attention_output) + hidden_states
476
+
477
+ # in Dinov2WithRegisters, layernorm is also applied after self-attention
478
+ layer_output = self.norm2(hidden_states)
479
+ layer_output = self.mlp(layer_output)
480
+ layer_output = self.layer_scale2(layer_output)
481
+
482
+ # second residual connection
483
+ layer_output = self.drop_path(layer_output) + hidden_states
484
+
485
+ outputs = (layer_output,) + outputs
486
+
487
+ return outputs
488
+
489
+
490
+ class Dinov2WithRegistersEncoder(nn.Module):
491
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
492
+ super().__init__()
493
+ self.config = config
494
+ self.layer = nn.ModuleList([Dinov2WithRegistersLayer(config) for _ in range(config.num_hidden_layers)])
495
+ self.gradient_checkpointing = False
496
+
497
+ def forward(
498
+ self,
499
+ hidden_states: torch.Tensor,
500
+ head_mask: Optional[torch.Tensor] = None,
501
+ output_attentions: bool = False,
502
+ output_hidden_states: bool = False,
503
+ return_dict: bool = True,
504
+ ) -> Union[tuple, BaseModelOutput]:
505
+ all_hidden_states = () if output_hidden_states else None
506
+ all_self_attentions = () if output_attentions else None
507
+
508
+ for i, layer_module in enumerate(self.layer):
509
+ if output_hidden_states:
510
+ all_hidden_states = all_hidden_states + (hidden_states,)
511
+
512
+ layer_head_mask = head_mask[i] if head_mask is not None else None
513
+
514
+ if self.gradient_checkpointing and self.training:
515
+ layer_outputs = self._gradient_checkpointing_func(
516
+ layer_module.__call__,
517
+ hidden_states,
518
+ layer_head_mask,
519
+ output_attentions,
520
+ )
521
+ else:
522
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
523
+
524
+ hidden_states = layer_outputs[0]
525
+
526
+ if output_attentions:
527
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
528
+
529
+ if output_hidden_states:
530
+ all_hidden_states = all_hidden_states + (hidden_states,)
531
+
532
+ if not return_dict:
533
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
534
+ return BaseModelOutput(
535
+ last_hidden_state=hidden_states,
536
+ hidden_states=all_hidden_states,
537
+ attentions=all_self_attentions,
538
+ )
539
+
540
+
541
+ class Dinov2WithRegistersPreTrainedModel(PreTrainedModel):
542
+ """
543
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
544
+ models.
545
+ """
546
+
547
+ config_class = Dinov2WithRegistersConfig
548
+ base_model_prefix = "dinov2_with_registers"
549
+ main_input_name = "pixel_values"
550
+ supports_gradient_checkpointing = True
551
+ _no_split_modules = ["Dinov2WithRegistersSwiGLUFFN"]
552
+ _supports_sdpa = True
553
+
554
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
555
+ """Initialize the weights"""
556
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
557
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
558
+ # `trunc_normal_cpu` not implemented in `half` issues
559
+ module.weight.data = nn.init.trunc_normal_(
560
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
561
+ ).to(module.weight.dtype)
562
+ if module.bias is not None:
563
+ module.bias.data.zero_()
564
+ elif isinstance(module, nn.LayerNorm):
565
+ module.bias.data.zero_()
566
+ module.weight.data.fill_(1.0)
567
+ elif isinstance(module, Dinov2WithRegistersEmbeddings):
568
+ module.position_embeddings.data = nn.init.trunc_normal_(
569
+ module.position_embeddings.data.to(torch.float32),
570
+ mean=0.0,
571
+ std=self.config.initializer_range,
572
+ ).to(module.position_embeddings.dtype)
573
+
574
+ module.cls_token.data = nn.init.trunc_normal_(
575
+ module.cls_token.data.to(torch.float32),
576
+ mean=0.0,
577
+ std=self.config.initializer_range,
578
+ ).to(module.cls_token.dtype)
579
+
580
+
581
+ _EXPECTED_OUTPUT_SHAPE = [1, 257, 768]
582
+
583
+
584
+ DINOV2_WITH_REGISTERS_START_DOCSTRING = r"""
585
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
586
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
587
+ behavior.
588
+
589
+ Parameters:
590
+ config ([`Dinov2WithRegistersConfig`]): Model configuration class with all the parameters of the model.
591
+ Initializing with a config file does not load the weights associated with the model, only the
592
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
593
+ """
594
+
595
+ DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING = r"""
596
+ Args:
597
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
598
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
599
+ [`BitImageProcessor.preprocess`] for details.
600
+
601
+ bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
602
+ Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
603
+ pre-training.
604
+
605
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
606
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
607
+
608
+ - 1 indicates the head is **not masked**,
609
+ - 0 indicates the head is **masked**.
610
+
611
+ output_attentions (`bool`, *optional*):
612
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
613
+ tensors for more detail.
614
+ output_hidden_states (`bool`, *optional*):
615
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
616
+ more detail.
617
+ return_dict (`bool`, *optional*):
618
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
619
+ """
620
+
621
+
622
+ @add_start_docstrings(
623
+ "The bare Dinov2WithRegisters Model transformer outputting raw hidden-states without any specific head on top.",
624
+ DINOV2_WITH_REGISTERS_START_DOCSTRING,
625
+ )
626
+ class Dinov2WithRegistersModel(Dinov2WithRegistersPreTrainedModel):
627
+ def __init__(self, config: Dinov2WithRegistersConfig):
628
+ super().__init__(config)
629
+ self.config = config
630
+
631
+ self.embeddings = Dinov2WithRegistersEmbeddings(config)
632
+ self.encoder = Dinov2WithRegistersEncoder(config)
633
+
634
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
635
+
636
+ # Initialize weights and apply final processing
637
+ self.post_init()
638
+
639
+ def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
640
+ return self.embeddings.patch_embeddings
641
+
642
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
643
+ """
644
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
645
+ class PreTrainedModel
646
+ """
647
+ for layer, heads in heads_to_prune.items():
648
+ self.encoder.layer[layer].attention.prune_heads(heads)
649
+
650
+ @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_BASE_INPUTS_DOCSTRING)
651
+ @add_code_sample_docstrings(
652
+ checkpoint=_CHECKPOINT_FOR_DOC,
653
+ output_type=BaseModelOutputWithPooling,
654
+ config_class=_CONFIG_FOR_DOC,
655
+ modality="vision",
656
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
657
+ )
658
+ def forward(
659
+ self,
660
+ pixel_values: Optional[torch.Tensor] = None,
661
+ bool_masked_pos: Optional[torch.Tensor] = None,
662
+ head_mask: Optional[torch.Tensor] = None,
663
+ output_attentions: Optional[bool] = None,
664
+ output_hidden_states: Optional[bool] = None,
665
+ return_dict: Optional[bool] = None,
666
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
667
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
668
+ output_hidden_states = (
669
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
670
+ )
671
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
672
+
673
+ if pixel_values is None:
674
+ raise ValueError("You have to specify pixel_values")
675
+
676
+ # Prepare head mask if needed
677
+ # 1.0 in head_mask indicate we keep the head
678
+ # attention_probs has shape bsz x n_heads x N x N
679
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
680
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
681
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
682
+
683
+ embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
684
+
685
+ encoder_outputs = self.encoder(
686
+ embedding_output,
687
+ head_mask=head_mask,
688
+ output_attentions=output_attentions,
689
+ output_hidden_states=output_hidden_states,
690
+ return_dict=return_dict,
691
+ )
692
+ sequence_output = encoder_outputs[0]
693
+ sequence_output = self.layernorm(sequence_output)
694
+ pooled_output = sequence_output[:, 0, :]
695
+
696
+ if not return_dict:
697
+ head_outputs = (sequence_output, pooled_output)
698
+ return head_outputs + encoder_outputs[1:]
699
+
700
+ return BaseModelOutputWithPooling(
701
+ last_hidden_state=sequence_output,
702
+ pooler_output=pooled_output,
703
+ hidden_states=encoder_outputs.hidden_states,
704
+ attentions=encoder_outputs.attentions,
705
+ )
706
+
707
+
708
+ # Image classification docstring
709
+ _IMAGE_CLASS_CHECKPOINT = "facebook/dinov2_with_registers-small-imagenet1k-1-layer"
710
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
711
+
712
+ DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING = r"""
713
+ Args:
714
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
715
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
716
+ [`BitImageProcessor.preprocess`] for details.
717
+
718
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
719
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
720
+
721
+ - 1 indicates the head is **not masked**,
722
+ - 0 indicates the head is **masked**.
723
+
724
+ output_attentions (`bool`, *optional*):
725
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
726
+ tensors for more detail.
727
+ output_hidden_states (`bool`, *optional*):
728
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
729
+ more detail.
730
+ return_dict (`bool`, *optional*):
731
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
732
+ """
733
+
734
+
735
+ @add_start_docstrings(
736
+ """
737
+ Dinov2WithRegisters Model transformer with an image classification head on top (a linear layer on top of the final hidden state
738
+ of the [CLS] token) e.g. for ImageNet.
739
+ """,
740
+ DINOV2_WITH_REGISTERS_START_DOCSTRING,
741
+ )
742
+ class Dinov2WithRegistersForImageClassification(Dinov2WithRegistersPreTrainedModel):
743
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
744
+ super().__init__(config)
745
+
746
+ self.num_labels = config.num_labels
747
+ self.dinov2_with_registers = Dinov2WithRegistersModel(config)
748
+
749
+ # Classifier head
750
+ self.classifier = (
751
+ nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
752
+ )
753
+
754
+ # Initialize weights and apply final processing
755
+ self.post_init()
756
+
757
+ @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING)
758
+ @add_code_sample_docstrings(
759
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
760
+ output_type=ImageClassifierOutput,
761
+ config_class=_CONFIG_FOR_DOC,
762
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
763
+ )
764
+ def forward(
765
+ self,
766
+ pixel_values: Optional[torch.Tensor] = None,
767
+ head_mask: Optional[torch.Tensor] = None,
768
+ labels: Optional[torch.Tensor] = None,
769
+ output_attentions: Optional[bool] = None,
770
+ output_hidden_states: Optional[bool] = None,
771
+ return_dict: Optional[bool] = None,
772
+ ) -> Union[tuple, ImageClassifierOutput]:
773
+ r"""
774
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
775
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
776
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
777
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
778
+ """
779
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
780
+
781
+ outputs = self.dinov2_with_registers(
782
+ pixel_values,
783
+ head_mask=head_mask,
784
+ output_attentions=output_attentions,
785
+ output_hidden_states=output_hidden_states,
786
+ return_dict=return_dict,
787
+ )
788
+
789
+ sequence_output = outputs[0] # batch_size, sequence_length, hidden_size
790
+
791
+ cls_token = sequence_output[:, 0]
792
+ patch_tokens = sequence_output[:, 1:]
793
+
794
+ linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
795
+
796
+ logits = self.classifier(linear_input)
797
+
798
+ loss = None
799
+ if labels is not None:
800
+ # move labels to correct device to enable model parallelism
801
+ labels = labels.to(logits.device)
802
+ if self.config.problem_type is None:
803
+ if self.num_labels == 1:
804
+ self.config.problem_type = "regression"
805
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
806
+ self.config.problem_type = "single_label_classification"
807
+ else:
808
+ self.config.problem_type = "multi_label_classification"
809
+
810
+ if self.config.problem_type == "regression":
811
+ loss_fct = MSELoss()
812
+ if self.num_labels == 1:
813
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
814
+ else:
815
+ loss = loss_fct(logits, labels)
816
+ elif self.config.problem_type == "single_label_classification":
817
+ loss_fct = CrossEntropyLoss()
818
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
819
+ elif self.config.problem_type == "multi_label_classification":
820
+ loss_fct = BCEWithLogitsLoss()
821
+ loss = loss_fct(logits, labels)
822
+
823
+ if not return_dict:
824
+ output = (logits,) + outputs[2:]
825
+ return ((loss,) + output) if loss is not None else output
826
+
827
+ return ImageClassifierOutput(
828
+ loss=loss,
829
+ logits=logits,
830
+ hidden_states=outputs.hidden_states,
831
+ attentions=outputs.attentions,
832
+ )
833
+
834
+
835
+ @add_start_docstrings(
836
+ """
837
+ Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer.
838
+ """,
839
+ DINOV2_WITH_REGISTERS_START_DOCSTRING,
840
+ )
841
+ class Dinov2WithRegistersBackbone(Dinov2WithRegistersPreTrainedModel, BackboneMixin):
842
+ def __init__(self, config):
843
+ super().__init__(config)
844
+ super()._init_backbone(config)
845
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
846
+ self.embeddings = Dinov2WithRegistersEmbeddings(config)
847
+ self.encoder = Dinov2WithRegistersEncoder(config)
848
+
849
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
850
+
851
+ self.num_register_tokens = config.num_register_tokens
852
+
853
+ # Initialize weights and apply final processing
854
+ self.post_init()
855
+
856
+ def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
857
+ return self.embeddings.patch_embeddings
858
+
859
+ @add_start_docstrings_to_model_forward(DINOV2_WITH_REGISTERS_INPUTS_DOCSTRING)
860
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
861
+ def forward(
862
+ self,
863
+ pixel_values: torch.Tensor,
864
+ output_hidden_states: Optional[bool] = None,
865
+ output_attentions: Optional[bool] = None,
866
+ return_dict: Optional[bool] = None,
867
+ ) -> BackboneOutput:
868
+ """
869
+ Returns:
870
+
871
+ Examples:
872
+ Returns:
873
+
874
+ Examples:
875
+
876
+
877
+ ```python
878
+ >>> from transformers import AutoImageProcessor, AutoBackbone
879
+ >>> import torch
880
+ >>> from PIL import Image
881
+ >>> import requests
882
+
883
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
884
+ >>> image = Image.open(requests.get(url, stream=True).raw)
885
+
886
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
887
+ >>> model = AutoBackbone.from_pretrained(
888
+ ... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
889
+ ... )
890
+
891
+ >>> inputs = processor(image, return_tensors="pt")
892
+
893
+ >>> outputs = model(**inputs)
894
+ >>> feature_maps = outputs.feature_maps
895
+ >>> list(feature_maps[-1].shape)
896
+ [1, 768, 16, 16]
897
+ ```"""
898
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
899
+ output_hidden_states = (
900
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
901
+ )
902
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
903
+
904
+ embedding_output = self.embeddings(pixel_values)
905
+
906
+ outputs = self.encoder(
907
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
908
+ )
909
+
910
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
911
+
912
+ feature_maps = ()
913
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
914
+ if stage in self.out_features:
915
+ if self.config.apply_layernorm:
916
+ hidden_state = self.layernorm(hidden_state)
917
+ if self.config.reshape_hidden_states:
918
+ hidden_state = hidden_state[:, self.num_register_tokens + 1 :]
919
+ # this was actually a bug in the original implementation that we copied here,
920
+ # cause normally the order is height, width
921
+ batch_size, _, height, width = pixel_values.shape
922
+ patch_size = self.config.patch_size
923
+ hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
924
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
925
+ feature_maps += (hidden_state,)
926
+
927
+ if not return_dict:
928
+ if output_hidden_states:
929
+ output = (feature_maps,) + outputs[1:]
930
+ else:
931
+ output = (feature_maps,) + outputs[2:]
932
+ return output
933
+
934
+ return BackboneOutput(
935
+ feature_maps=feature_maps,
936
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
937
+ attentions=outputs.attentions if output_attentions else None,
938
+ )
939
+
940
+
941
+ __all__ = [
942
+ "Dinov2WithRegistersPreTrainedModel",
943
+ "Dinov2WithRegistersModel",
944
+ "Dinov2WithRegistersForImageClassification",
945
+ "Dinov2WithRegistersBackbone",
946
+ ]
janus/lib/python3.10/site-packages/transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Meta Inc. and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from typing import Optional
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+
23
+ from ....transformers.models.dinov2.modeling_dinov2 import (
24
+ Dinov2Backbone,
25
+ Dinov2Encoder,
26
+ Dinov2ForImageClassification,
27
+ Dinov2Model,
28
+ Dinov2PatchEmbeddings,
29
+ Dinov2PreTrainedModel,
30
+ )
31
+ from ...configuration_utils import PretrainedConfig
32
+ from ...modeling_outputs import BackboneOutput
33
+ from ...utils import logging, torch_int
34
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+
40
+ class Dinov2WithRegistersConfig(BackboneConfigMixin, PretrainedConfig):
41
+ r"""
42
+ This is the configuration class to store the configuration of a [`Dinov2WithRegistersModel`]. It is used to instantiate an
43
+ Dinov2WithRegisters model according to the specified arguments, defining the model architecture. Instantiating a configuration
44
+ with the defaults will yield a similar configuration to that of the DINOv2 with Registers
45
+ [facebook/dinov2-with-registers-base](https://huggingface.co/facebook/dinov2-with-registers-base) architecture.
46
+
47
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
48
+ documentation from [`PretrainedConfig`] for more information.
49
+
50
+ Args:
51
+ hidden_size (`int`, *optional*, defaults to 768):
52
+ Dimensionality of the encoder layers and the pooler layer.
53
+ num_hidden_layers (`int`, *optional*, defaults to 12):
54
+ Number of hidden layers in the Transformer encoder.
55
+ num_attention_heads (`int`, *optional*, defaults to 12):
56
+ Number of attention heads for each attention layer in the Transformer encoder.
57
+ mlp_ratio (`int`, *optional*, defaults to 4):
58
+ Ratio of the hidden size of the MLPs relative to the `hidden_size`.
59
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
60
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
61
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
62
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
63
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
64
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
65
+ The dropout ratio for the attention probabilities.
66
+ initializer_range (`float`, *optional*, defaults to 0.02):
67
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
68
+ layer_norm_eps (`float`, *optional*, defaults to 1e-06):
69
+ The epsilon used by the layer normalization layers.
70
+ image_size (`int`, *optional*, defaults to 224):
71
+ The size (resolution) of each image.
72
+ patch_size (`int`, *optional*, defaults to 16):
73
+ The size (resolution) of each patch.
74
+ num_channels (`int`, *optional*, defaults to 3):
75
+ The number of input channels.
76
+ qkv_bias (`bool`, *optional*, defaults to `True`):
77
+ Whether to add a bias to the queries, keys and values.
78
+ layerscale_value (`float`, *optional*, defaults to 1.0):
79
+ Initial value to use for layer scale.
80
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
81
+ Stochastic depth rate per sample (when applied in the main path of residual layers).
82
+ use_swiglu_ffn (`bool`, *optional*, defaults to `False`):
83
+ Whether to use the SwiGLU feedforward neural network.
84
+ num_register_tokens (`int`, *optional*, defaults to 4):
85
+ Number of register tokens to use.
86
+ out_features (`List[str]`, *optional*):
87
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
88
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
89
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
90
+ same order as defined in the `stage_names` attribute.
91
+ out_indices (`List[int]`, *optional*):
92
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
93
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
94
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
95
+ same order as defined in the `stage_names` attribute.
96
+ apply_layernorm (`bool`, *optional*, defaults to `True`):
97
+ Whether to apply layer normalization to the feature maps in case the model is used as backbone.
98
+ reshape_hidden_states (`bool`, *optional*, defaults to `True`):
99
+ Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
100
+ case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
101
+ seq_len, hidden_size)`.
102
+
103
+ Example:
104
+
105
+ ```python
106
+ >>> from transformers import Dinov2WithRegistersConfig, Dinov2WithRegistersModel
107
+
108
+ >>> # Initializing a Dinov2WithRegisters base style configuration
109
+ >>> configuration = Dinov2WithRegistersConfig()
110
+
111
+ >>> # Initializing a model (with random weights) from the base style configuration
112
+ >>> model = Dinov2WithRegistersModel(configuration)
113
+
114
+ >>> # Accessing the model configuration
115
+ >>> configuration = model.config
116
+ ```"""
117
+
118
+ model_type = "dinov2_with_registers"
119
+
120
+ def __init__(
121
+ self,
122
+ hidden_size=768,
123
+ num_hidden_layers=12,
124
+ num_attention_heads=12,
125
+ mlp_ratio=4,
126
+ hidden_act="gelu",
127
+ hidden_dropout_prob=0.0,
128
+ attention_probs_dropout_prob=0.0,
129
+ initializer_range=0.02,
130
+ layer_norm_eps=1e-6,
131
+ image_size=224,
132
+ patch_size=16,
133
+ num_channels=3,
134
+ qkv_bias=True,
135
+ layerscale_value=1.0,
136
+ drop_path_rate=0.0,
137
+ use_swiglu_ffn=False,
138
+ num_register_tokens=4,
139
+ out_features=None,
140
+ out_indices=None,
141
+ apply_layernorm=True,
142
+ reshape_hidden_states=True,
143
+ **kwargs,
144
+ ):
145
+ super().__init__(**kwargs)
146
+
147
+ self.hidden_size = hidden_size
148
+ self.num_hidden_layers = num_hidden_layers
149
+ self.num_attention_heads = num_attention_heads
150
+ self.mlp_ratio = mlp_ratio
151
+ self.hidden_act = hidden_act
152
+ self.hidden_dropout_prob = hidden_dropout_prob
153
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
154
+ self.initializer_range = initializer_range
155
+ self.layer_norm_eps = layer_norm_eps
156
+ self.image_size = image_size
157
+ self.patch_size = patch_size
158
+ self.num_channels = num_channels
159
+ self.qkv_bias = qkv_bias
160
+ self.layerscale_value = layerscale_value
161
+ self.drop_path_rate = drop_path_rate
162
+ self.use_swiglu_ffn = use_swiglu_ffn
163
+ self.num_register_tokens = num_register_tokens
164
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_hidden_layers + 1)]
165
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
166
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
167
+ )
168
+ self.apply_layernorm = apply_layernorm
169
+ self.reshape_hidden_states = reshape_hidden_states
170
+
171
+
172
+ class Dinov2WithRegistersPatchEmbeddings(Dinov2PatchEmbeddings):
173
+ pass
174
+
175
+
176
+ class Dinov2WithRegistersEmbeddings(nn.Module):
177
+ """
178
+ Construct the CLS token, mask token, register tokens, position and patch embeddings.
179
+ """
180
+
181
+ def __init__(self, config: Dinov2WithRegistersConfig) -> None:
182
+ super().__init__()
183
+
184
+ self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
185
+ self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size))
186
+ self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
187
+ self.patch_embeddings = Dinov2WithRegistersPatchEmbeddings(config)
188
+ num_patches = self.patch_embeddings.num_patches
189
+ self.position_embeddings = nn.Parameter(torch.randn(1, num_patches + 1, config.hidden_size))
190
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
191
+ self.patch_size = config.patch_size
192
+ self.config = config
193
+
194
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
195
+ """
196
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
197
+ resolution images. This implementation supports torch.jit tracing while maintaining backwards compatibility
198
+ with the original implementation.
199
+
200
+ Adapted from:
201
+ - https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
202
+ - https://github.com/facebookresearch/dinov2/blob/main/dinov2/models/vision_transformer.py
203
+ """
204
+ num_patches = embeddings.shape[1] - 1
205
+ num_positions = self.position_embeddings.shape[1] - 1
206
+
207
+ # Skip interpolation for matching dimensions (unless tracing)
208
+ if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
209
+ return self.position_embeddings
210
+
211
+ # Handle class token and patch embeddings separately
212
+ class_pos_embed = self.position_embeddings[:, 0]
213
+ patch_pos_embed = self.position_embeddings[:, 1:]
214
+ dim = embeddings.shape[-1]
215
+
216
+ # Calculate new dimensions
217
+ height = height // self.config.patch_size
218
+ width = width // self.config.patch_size
219
+
220
+ # Reshape for interpolation
221
+ sqrt_num_positions = torch_int(num_positions**0.5)
222
+ patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
223
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
224
+
225
+ # Store original dtype for restoration after interpolation
226
+ target_dtype = patch_pos_embed.dtype
227
+
228
+ # Interpolate at float32 precision
229
+ patch_pos_embed = nn.functional.interpolate(
230
+ patch_pos_embed.to(dtype=torch.float32),
231
+ size=(torch_int(height), torch_int(width)), # Explicit size instead of scale_factor
232
+ mode="bicubic",
233
+ align_corners=False,
234
+ antialias=True,
235
+ ).to(dtype=target_dtype)
236
+
237
+ # Validate output dimensions if not tracing
238
+ if not torch.jit.is_tracing():
239
+ if int(height) != patch_pos_embed.shape[-2] or int(width) != patch_pos_embed.shape[-1]:
240
+ raise ValueError("Width or height does not match with the interpolated position embeddings")
241
+
242
+ # Reshape back to original format
243
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
244
+
245
+ # Combine class and patch embeddings
246
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
247
+
248
+ def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
249
+ batch_size, _, height, width = pixel_values.shape
250
+ target_dtype = self.patch_embeddings.projection.weight.dtype
251
+ embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
252
+
253
+ if bool_masked_pos is not None:
254
+ embeddings = torch.where(
255
+ bool_masked_pos.unsqueeze(-1), self.mask_token.to(embeddings.dtype).unsqueeze(0), embeddings
256
+ )
257
+
258
+ # add the [CLS] token to the embedded patch tokens
259
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
260
+ embeddings = torch.cat((cls_tokens, embeddings), dim=1)
261
+
262
+ # add positional encoding to each token
263
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
264
+
265
+ # add register tokens
266
+ embeddings = torch.cat(
267
+ (embeddings[:, :1], self.register_tokens.expand(embeddings.shape[0], -1, -1), embeddings[:, 1:]), dim=1
268
+ )
269
+
270
+ embeddings = self.dropout(embeddings)
271
+
272
+ return embeddings
273
+
274
+
275
+ class Dinov2WithRegistersEncoder(Dinov2Encoder):
276
+ pass
277
+
278
+
279
+ class Dinov2WithRegistersPreTrainedModel(Dinov2PreTrainedModel):
280
+ pass
281
+
282
+
283
+ class Dinov2WithRegistersModel(Dinov2Model):
284
+ pass
285
+
286
+
287
+ class Dinov2WithRegistersForImageClassification(Dinov2ForImageClassification):
288
+ pass
289
+
290
+
291
+ class Dinov2WithRegistersBackbone(Dinov2Backbone):
292
+ def __init__(self, config):
293
+ super().__init__(config)
294
+ super()._init_backbone(config)
295
+
296
+ self.num_register_tokens = config.num_register_tokens
297
+ self.num_features = [config.hidden_size for _ in range(config.num_hidden_layers + 1)]
298
+ self.embeddings = Dinov2WithRegistersEmbeddings(config)
299
+ self.encoder = Dinov2WithRegistersEncoder(config)
300
+
301
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
302
+
303
+ # Initialize weights and apply final processing
304
+ self.post_init()
305
+
306
+ def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
307
+ return self.embeddings.patch_embeddings
308
+
309
+ def forward(
310
+ self,
311
+ pixel_values: torch.Tensor,
312
+ output_hidden_states: Optional[bool] = None,
313
+ output_attentions: Optional[bool] = None,
314
+ return_dict: Optional[bool] = None,
315
+ ) -> BackboneOutput:
316
+ """
317
+ Returns:
318
+
319
+ Examples:
320
+
321
+ ```python
322
+ >>> from transformers import AutoImageProcessor, AutoBackbone
323
+ >>> import torch
324
+ >>> from PIL import Image
325
+ >>> import requests
326
+
327
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
328
+ >>> image = Image.open(requests.get(url, stream=True).raw)
329
+
330
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
331
+ >>> model = AutoBackbone.from_pretrained(
332
+ ... "facebook/dinov2-with-registers-base", out_features=["stage2", "stage5", "stage8", "stage11"]
333
+ ... )
334
+
335
+ >>> inputs = processor(image, return_tensors="pt")
336
+
337
+ >>> outputs = model(**inputs)
338
+ >>> feature_maps = outputs.feature_maps
339
+ >>> list(feature_maps[-1].shape)
340
+ [1, 768, 16, 16]
341
+ ```"""
342
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
343
+ output_hidden_states = (
344
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
345
+ )
346
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
347
+
348
+ embedding_output = self.embeddings(pixel_values)
349
+
350
+ outputs = self.encoder(
351
+ embedding_output, output_hidden_states=True, output_attentions=output_attentions, return_dict=return_dict
352
+ )
353
+
354
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
355
+
356
+ feature_maps = ()
357
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
358
+ if stage in self.out_features:
359
+ if self.config.apply_layernorm:
360
+ hidden_state = self.layernorm(hidden_state)
361
+ if self.config.reshape_hidden_states:
362
+ hidden_state = hidden_state[:, self.num_register_tokens + 1 :]
363
+ # this was actually a bug in the original implementation that we copied here,
364
+ # cause normally the order is height, width
365
+ batch_size, _, height, width = pixel_values.shape
366
+ patch_size = self.config.patch_size
367
+ hidden_state = hidden_state.reshape(batch_size, height // patch_size, width // patch_size, -1)
368
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
369
+ feature_maps += (hidden_state,)
370
+
371
+ if not return_dict:
372
+ if output_hidden_states:
373
+ output = (feature_maps,) + outputs[1:]
374
+ else:
375
+ output = (feature_maps,) + outputs[2:]
376
+ return output
377
+
378
+ return BackboneOutput(
379
+ feature_maps=feature_maps,
380
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
381
+ attentions=outputs.attentions if output_attentions else None,
382
+ )
383
+
384
+
385
+ __all__ = [
386
+ "Dinov2WithRegistersConfig",
387
+ "Dinov2WithRegistersPreTrainedModel",
388
+ "Dinov2WithRegistersModel",
389
+ "Dinov2WithRegistersForImageClassification",
390
+ "Dinov2WithRegistersBackbone",
391
+ ]
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/configuration_instructblipvideo.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/image_processing_instructblipvideo.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modeling_instructblipvideo.cpython-310.pyc ADDED
Binary file (51.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/modular_instructblipvideo.cpython-310.pyc ADDED
Binary file (15 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/__pycache__/processing_instructblipvideo.cpython-310.pyc ADDED
Binary file (7.36 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/image_processing_instructblipvideo.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Image processor class for InstructBLIPVideo. Largely copy of Blip2Processor with addition of a video processing abilities
18
+ """
19
+
20
+ from typing import Dict, List, Optional, Union
21
+
22
+ import numpy as np
23
+
24
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
25
+ from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
26
+ from ...image_utils import (
27
+ OPENAI_CLIP_MEAN,
28
+ OPENAI_CLIP_STD,
29
+ ChannelDimension,
30
+ ImageInput,
31
+ PILImageResampling,
32
+ VideoInput,
33
+ infer_channel_dimension_format,
34
+ is_scaled_image,
35
+ is_valid_image,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_preprocess_arguments,
39
+ )
40
+ from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
41
+
42
+
43
+ if is_vision_available():
44
+ import PIL
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+
50
+ def make_batched_videos(videos) -> List[VideoInput]:
51
+ if isinstance(videos, (list, tuple)) and isinstance(videos[0], (list, tuple)) and is_valid_image(videos[0][0]):
52
+ return videos
53
+
54
+ elif isinstance(videos, (list, tuple)) and is_valid_image(videos[0]):
55
+ if isinstance(videos[0], PIL.Image.Image):
56
+ return [videos]
57
+ elif len(videos[0].shape) == 4:
58
+ return [list(video) for video in videos]
59
+
60
+ elif is_valid_image(videos):
61
+ if isinstance(videos, PIL.Image.Image):
62
+ return [[videos]]
63
+ elif len(videos.shape) == 4:
64
+ return [list(videos)]
65
+
66
+ raise ValueError(f"Could not make batched video from {videos}")
67
+
68
+
69
+ # Copied from transformers.models.blip.image_processing_blip.BlipImageProcessor with Blip->InstructBlipVideo, BLIP->InstructBLIPVideo
70
+ class InstructBlipVideoImageProcessor(BaseImageProcessor):
71
+ r"""
72
+ Constructs a InstructBLIPVideo image processor.
73
+
74
+ Args:
75
+ do_resize (`bool`, *optional*, defaults to `True`):
76
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
77
+ `do_resize` parameter in the `preprocess` method.
78
+ size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
79
+ Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
80
+ method.
81
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
82
+ Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
83
+ overridden by the `resample` parameter in the `preprocess` method.
84
+ do_rescale (`bool`, *optional*, defaults to `True`):
85
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
86
+ `do_rescale` parameter in the `preprocess` method.
87
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
88
+ Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
89
+ overridden by the `rescale_factor` parameter in the `preprocess` method.
90
+ do_normalize (`bool`, *optional*, defaults to `True`):
91
+ Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
92
+ method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
93
+ image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
94
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
95
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
96
+ overridden by the `image_mean` parameter in the `preprocess` method.
97
+ image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
98
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
99
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
100
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
101
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
102
+ Whether to convert the image to RGB.
103
+ """
104
+
105
+ model_input_names = ["pixel_values"]
106
+
107
+ def __init__(
108
+ self,
109
+ do_resize: bool = True,
110
+ size: Dict[str, int] = None,
111
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
112
+ do_rescale: bool = True,
113
+ rescale_factor: Union[int, float] = 1 / 255,
114
+ do_normalize: bool = True,
115
+ image_mean: Optional[Union[float, List[float]]] = None,
116
+ image_std: Optional[Union[float, List[float]]] = None,
117
+ do_convert_rgb: bool = True,
118
+ **kwargs,
119
+ ) -> None:
120
+ super().__init__(**kwargs)
121
+ size = size if size is not None else {"height": 384, "width": 384}
122
+ size = get_size_dict(size, default_to_square=True)
123
+
124
+ self.do_resize = do_resize
125
+ self.size = size
126
+ self.resample = resample
127
+ self.do_rescale = do_rescale
128
+ self.rescale_factor = rescale_factor
129
+ self.do_normalize = do_normalize
130
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
131
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
132
+ self.do_convert_rgb = do_convert_rgb
133
+
134
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
135
+ def resize(
136
+ self,
137
+ image: np.ndarray,
138
+ size: Dict[str, int],
139
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
140
+ data_format: Optional[Union[str, ChannelDimension]] = None,
141
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
142
+ **kwargs,
143
+ ) -> np.ndarray:
144
+ """
145
+ Resize an image to `(size["height"], size["width"])`.
146
+
147
+ Args:
148
+ image (`np.ndarray`):
149
+ Image to resize.
150
+ size (`Dict[str, int]`):
151
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
152
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
153
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
154
+ data_format (`ChannelDimension` or `str`, *optional*):
155
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
156
+ image is used. Can be one of:
157
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
158
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
159
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
160
+ input_data_format (`ChannelDimension` or `str`, *optional*):
161
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
162
+ from the input image. Can be one of:
163
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
164
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
165
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
166
+
167
+ Returns:
168
+ `np.ndarray`: The resized image.
169
+ """
170
+ size = get_size_dict(size)
171
+ if "height" not in size or "width" not in size:
172
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
173
+
174
+ output_size = (size["height"], size["width"])
175
+ return resize(
176
+ image,
177
+ size=output_size,
178
+ resample=resample,
179
+ data_format=data_format,
180
+ input_data_format=input_data_format,
181
+ **kwargs,
182
+ )
183
+
184
+ # Ignore copy
185
+ @filter_out_non_signature_kwargs()
186
+ def preprocess(
187
+ self,
188
+ images: VideoInput = None,
189
+ do_resize: Optional[bool] = None,
190
+ size: Optional[Dict[str, int]] = None,
191
+ resample: PILImageResampling = None,
192
+ do_rescale: Optional[bool] = None,
193
+ rescale_factor: Optional[float] = None,
194
+ do_normalize: Optional[bool] = None,
195
+ image_mean: Optional[Union[float, List[float]]] = None,
196
+ image_std: Optional[Union[float, List[float]]] = None,
197
+ return_tensors: Optional[Union[str, TensorType]] = None,
198
+ do_convert_rgb: bool = None,
199
+ data_format: ChannelDimension = ChannelDimension.FIRST,
200
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
201
+ ) -> PIL.Image.Image:
202
+ """
203
+ Preprocess a video or batch of images/videos.
204
+
205
+ Args:
206
+ videos (`VideoInput`):
207
+ Video frames to preprocess. Expects a single or batch of videos as a list of frames with pixel values
208
+ ranging from 0 to 255. If passing in video with pixel values between 0 and 1, set `do_rescale=False`.
209
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
210
+ Whether to resize the video.
211
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
212
+ Controls the size of the video after `resize`. The shortest edge of the image is resized to
213
+ `size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
214
+ is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
215
+ edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
216
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
217
+ Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`.
218
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
219
+ Whether to rescale the video values between [0 - 1].
220
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
221
+ Rescale factor to rescale the video by if `do_rescale` is set to `True`.
222
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
223
+ Whether to normalize the video.
224
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
225
+ Image mean to normalize the video by if `do_normalize` is set to `True`.
226
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
227
+ Image standard deviation to normalize the video by if `do_normalize` is set to `True`.
228
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
229
+ Whether to convert the image to RGB.
230
+ return_tensors (`str` or `TensorType`, *optional*):
231
+ The type of tensors to return. Can be one of:
232
+ - Unset: Return a list of `np.ndarray`.
233
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
234
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
235
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
236
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
237
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
238
+ The channel dimension format for the output image. Can be one of:
239
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
240
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
241
+ - Unset: Use the channel dimension format of the input image.
242
+ input_data_format (`ChannelDimension` or `str`, *optional*):
243
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
244
+ from the input image. Can be one of:
245
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
246
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
247
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
248
+ """
249
+ do_resize = do_resize if do_resize is not None else self.do_resize
250
+ resample = resample if resample is not None else self.resample
251
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
252
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
253
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
254
+ image_mean = image_mean if image_mean is not None else self.image_mean
255
+ image_std = image_std if image_std is not None else self.image_std
256
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
257
+
258
+ size = size if size is not None else self.size
259
+ size = get_size_dict(size, default_to_square=False)
260
+
261
+ videos = make_batched_videos(images)
262
+
263
+ validate_preprocess_arguments(
264
+ do_rescale=do_rescale,
265
+ rescale_factor=rescale_factor,
266
+ do_normalize=do_normalize,
267
+ image_mean=image_mean,
268
+ image_std=image_std,
269
+ do_resize=do_resize,
270
+ size=size,
271
+ resample=resample,
272
+ )
273
+
274
+ if not valid_images(videos):
275
+ raise ValueError(
276
+ "Invalid input type. Must be of type PIL.Image.Image, numpy.ndarray, "
277
+ "torch.Tensor, tf.Tensor or jax.ndarray."
278
+ )
279
+
280
+ pixel_values = [
281
+ [
282
+ self._preprocess_image(
283
+ image=frame,
284
+ do_resize=do_resize,
285
+ size=size,
286
+ resample=resample,
287
+ do_rescale=do_rescale,
288
+ rescale_factor=rescale_factor,
289
+ do_normalize=do_normalize,
290
+ image_mean=image_mean,
291
+ image_std=image_std,
292
+ do_convert_rgb=do_convert_rgb,
293
+ data_format=data_format,
294
+ input_data_format=input_data_format,
295
+ )
296
+ for frame in video
297
+ ]
298
+ for video in videos
299
+ ]
300
+
301
+ encoded_outputs = BatchFeature(data={"pixel_values": pixel_values}, tensor_type=return_tensors)
302
+ return encoded_outputs
303
+
304
+ # Ignore copy
305
+ def _preprocess_image(
306
+ self,
307
+ image: ImageInput = None,
308
+ do_resize: Optional[bool] = None,
309
+ size: Optional[Dict[str, int]] = None,
310
+ resample: PILImageResampling = None,
311
+ do_rescale: Optional[bool] = None,
312
+ rescale_factor: Optional[float] = None,
313
+ do_normalize: Optional[bool] = None,
314
+ image_mean: Optional[Union[float, List[float]]] = None,
315
+ image_std: Optional[Union[float, List[float]]] = None,
316
+ do_convert_rgb: bool = None,
317
+ data_format: ChannelDimension = ChannelDimension.FIRST,
318
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
319
+ ) -> np.ndarray:
320
+ # PIL RGBA images are converted to RGB
321
+ if do_convert_rgb:
322
+ image = convert_to_rgb(image)
323
+
324
+ # All transformations expect numpy arrays.
325
+ image = to_numpy_array(image)
326
+
327
+ if do_rescale and is_scaled_image(image):
328
+ logger.warning_once(
329
+ "It looks like you are trying to rescale already rescaled video frames. If the input"
330
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
331
+ )
332
+
333
+ if input_data_format is None:
334
+ # We assume that all images have the same channel dimension format.
335
+ input_data_format = infer_channel_dimension_format(image)
336
+
337
+ if do_resize:
338
+ image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
339
+
340
+ if do_rescale:
341
+ image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
342
+
343
+ if do_normalize:
344
+ image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
345
+
346
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
347
+
348
+ return image
janus/lib/python3.10/site-packages/transformers/models/instructblipvideo/processing_instructblipvideo.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Processor class for InstructBLIP. Largely copy of Blip2Processor with addition of a tokenizer for the Q-Former.
17
+ """
18
+
19
+ import os
20
+ from typing import List, Optional, Union
21
+
22
+ from ...image_processing_utils import BatchFeature
23
+ from ...image_utils import VideoInput
24
+ from ...processing_utils import ProcessorMixin
25
+ from ...tokenization_utils_base import (
26
+ AddedToken,
27
+ BatchEncoding,
28
+ PaddingStrategy,
29
+ PreTokenizedInput,
30
+ TextInput,
31
+ TruncationStrategy,
32
+ )
33
+ from ...utils import TensorType, logging
34
+ from ..auto import AutoTokenizer
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+
40
+ class InstructBlipVideoProcessor(ProcessorMixin):
41
+ r"""
42
+ Constructs an InstructBLIPVideo processor which wraps a InstructBLIP image processor and a LLaMa/T5 tokenizer into a single
43
+ processor.
44
+
45
+ [`InstructBlipVideoProcessor`] offers all the functionalities of [`InstructBlipVideoImageProcessor`] and [`AutoTokenizer`]. See the
46
+ docstring of [`~InstructBlipVideoProcessor.__call__`] and [`~InstructBlipVideoProcessor.decode`] for more information.
47
+
48
+ Args:
49
+ image_processor (`InstructBlipVideoImageProcessor`):
50
+ An instance of [`InstructBlipVideoImageProcessor`]. The image processor is a required input.
51
+ tokenizer (`AutoTokenizer`):
52
+ An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
53
+ qformer_tokenizer (`AutoTokenizer`):
54
+ An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
55
+ num_query_tokens (`int`, *optional*):
56
+ Number of tokens used by the Qformer as queries, should be same as in model's config.
57
+ """
58
+
59
+ attributes = ["image_processor", "tokenizer", "qformer_tokenizer"]
60
+ valid_kwargs = ["num_query_tokens"]
61
+ image_processor_class = "InstructBlipVideoImageProcessor"
62
+ tokenizer_class = "AutoTokenizer"
63
+ qformer_tokenizer_class = "AutoTokenizer"
64
+
65
+ def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs):
66
+ if not hasattr(tokenizer, "video_token"):
67
+ self.video_token = AddedToken("<video>", normalized=False, special=True)
68
+ tokenizer.add_tokens([self.video_token], special_tokens=True)
69
+ else:
70
+ self.video_token = tokenizer.video_token
71
+ self.num_query_tokens = num_query_tokens
72
+ super().__init__(image_processor, tokenizer, qformer_tokenizer)
73
+
74
+ def __call__(
75
+ self,
76
+ images: VideoInput = None,
77
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
78
+ add_special_tokens: bool = True,
79
+ padding: Union[bool, str, PaddingStrategy] = False,
80
+ truncation: Union[bool, str, TruncationStrategy] = None,
81
+ max_length: Optional[int] = None,
82
+ stride: int = 0,
83
+ pad_to_multiple_of: Optional[int] = None,
84
+ return_attention_mask: Optional[bool] = None,
85
+ return_overflowing_tokens: bool = False,
86
+ return_special_tokens_mask: bool = False,
87
+ return_offsets_mapping: bool = False,
88
+ return_token_type_ids: bool = False,
89
+ return_length: bool = False,
90
+ verbose: bool = True,
91
+ return_tensors: Optional[Union[str, TensorType]] = None,
92
+ **kwargs,
93
+ ) -> BatchFeature:
94
+ """
95
+ This method uses [`InstructBlipVideoImageProcessor.__call__`] method to prepare image(s) or video(s) for the model, and
96
+ [`BertTokenizerFast.__call__`] to prepare text for the model.
97
+
98
+ Please refer to the docstring of the above two methods for more information.
99
+ """
100
+ if images is None and text is None:
101
+ raise ValueError("You have to specify at least one of images or text.")
102
+
103
+ encoding = BatchFeature()
104
+
105
+ if text is not None:
106
+ if isinstance(text, str):
107
+ text = [text]
108
+ elif not isinstance(text, list) and not isinstance(text[0], str):
109
+ raise ValueError("Invalid input text. Please provide a string, or a list of strings")
110
+
111
+ _text_encoding = self.tokenizer(
112
+ text=text,
113
+ add_special_tokens=add_special_tokens,
114
+ padding=padding,
115
+ truncation=truncation,
116
+ max_length=max_length,
117
+ stride=stride,
118
+ pad_to_multiple_of=pad_to_multiple_of,
119
+ return_attention_mask=return_attention_mask,
120
+ return_overflowing_tokens=return_overflowing_tokens,
121
+ return_special_tokens_mask=return_special_tokens_mask,
122
+ return_offsets_mapping=return_offsets_mapping,
123
+ return_token_type_ids=return_token_type_ids,
124
+ return_length=return_length,
125
+ verbose=verbose,
126
+ return_tensors=None, # required to concatenate below
127
+ **kwargs,
128
+ )
129
+
130
+ # if we know how many query tokens, expand text inside processor. We need this hacky manipulation
131
+ # because BLIP expects image tokens to be at the beginning even before BOS token
132
+ if self.num_query_tokens is not None and images is not None:
133
+ text_encoding = {}
134
+ video_tokens = (
135
+ self.video_token.content * self.num_query_tokens * 4
136
+ ) # InstrucBLIP works with 4 frames only
137
+ video_token_encoding = self.tokenizer(
138
+ [video_tokens] * len(text), add_special_tokens=False, return_tensors=None
139
+ )
140
+ for k in _text_encoding:
141
+ text_encoding[k] = [
142
+ img_encoding + txt_encoding
143
+ for img_encoding, txt_encoding in zip(video_token_encoding[k], _text_encoding[k])
144
+ ]
145
+ else:
146
+ text_encoding = _text_encoding
147
+ if images is not None:
148
+ logger.warning_once(
149
+ "Expanding inputs for video tokens in InstructBLIPVideo should be done in processing. "
150
+ "Please follow instruction here (https://gist.github.com/zucchini-nlp/65f22892b054dc0d68228af56fbeaac2) to update your InstructBLIPVideo model. "
151
+ "Using processors without these attributes in the config is deprecated and will throw an error in v4.47."
152
+ )
153
+
154
+ # cast to desired return tensors type after concatenating
155
+ text_encoding = BatchEncoding(text_encoding, tensor_type=return_tensors)
156
+ encoding.update(text_encoding)
157
+ qformer_text_encoding = self.qformer_tokenizer(
158
+ text=text,
159
+ add_special_tokens=add_special_tokens,
160
+ padding=padding,
161
+ truncation=truncation,
162
+ max_length=max_length,
163
+ stride=stride,
164
+ pad_to_multiple_of=pad_to_multiple_of,
165
+ return_attention_mask=return_attention_mask,
166
+ return_overflowing_tokens=return_overflowing_tokens,
167
+ return_special_tokens_mask=return_special_tokens_mask,
168
+ return_offsets_mapping=return_offsets_mapping,
169
+ return_token_type_ids=return_token_type_ids,
170
+ return_length=return_length,
171
+ verbose=verbose,
172
+ return_tensors=return_tensors,
173
+ **kwargs,
174
+ )
175
+ encoding["qformer_input_ids"] = qformer_text_encoding.pop("input_ids")
176
+ encoding["qformer_attention_mask"] = qformer_text_encoding.pop("attention_mask")
177
+
178
+ if images is not None:
179
+ image_encoding = self.image_processor(images, return_tensors=return_tensors)
180
+ encoding.update(image_encoding)
181
+
182
+ return encoding
183
+
184
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer
185
+ def batch_decode(self, *args, **kwargs):
186
+ """
187
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
188
+ refer to the docstring of this method for more information.
189
+ """
190
+ return self.tokenizer.batch_decode(*args, **kwargs)
191
+
192
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer
193
+ def decode(self, *args, **kwargs):
194
+ """
195
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
196
+ the docstring of this method for more information.
197
+ """
198
+ return self.tokenizer.decode(*args, **kwargs)
199
+
200
+ @property
201
+ # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
202
+ def model_input_names(self):
203
+ tokenizer_input_names = self.tokenizer.model_input_names
204
+ image_processor_input_names = self.image_processor.model_input_names
205
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
206
+
207
+ # overwrite to save the Q-Former tokenizer in a separate folder
208
+ def save_pretrained(self, save_directory, **kwargs):
209
+ if os.path.isfile(save_directory):
210
+ raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
211
+ os.makedirs(save_directory, exist_ok=True)
212
+ qformer_tokenizer_path = os.path.join(save_directory, "qformer_tokenizer")
213
+ self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path)
214
+
215
+ # We modify the attributes so that only the tokenizer and image processor are saved in the main folder
216
+ qformer_present = "qformer_tokenizer" in self.attributes
217
+ if qformer_present:
218
+ self.attributes.remove("qformer_tokenizer")
219
+
220
+ outputs = super().save_pretrained(save_directory, **kwargs)
221
+
222
+ if qformer_present:
223
+ self.attributes += ["qformer_tokenizer"]
224
+ return outputs
225
+
226
+ # overwrite to load the Q-Former tokenizer from a separate folder
227
+ @classmethod
228
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
229
+ processor = super().from_pretrained(pretrained_model_name_or_path, **kwargs)
230
+
231
+ # if return_unused_kwargs a tuple is returned where the second element is 'unused_kwargs'
232
+ if isinstance(processor, tuple):
233
+ processor = processor[0]
234
+ qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder="qformer_tokenizer")
235
+ processor.qformer_tokenizer = qformer_tokenizer
236
+ return processor
janus/lib/python3.10/site-packages/transformers/models/lilt/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_lilt import *
22
+ from .modeling_lilt import *
23
+ else:
24
+ import sys
25
+
26
+ _file = globals()["__file__"]
27
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (531 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/lilt/configuration_lilt.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """LiLT configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class LiltConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`LiltModel`]. It is used to instantiate a LiLT
27
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
28
+ defaults will yield a similar configuration to that of the LiLT
29
+ [SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) architecture.
30
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
31
+ documentation from [`PretrainedConfig`] for more information.
32
+
33
+ Args:
34
+ vocab_size (`int`, *optional*, defaults to 30522):
35
+ Vocabulary size of the LiLT model. Defines the number of different tokens that can be represented by the
36
+ `inputs_ids` passed when calling [`LiltModel`].
37
+ hidden_size (`int`, *optional*, defaults to 768):
38
+ Dimensionality of the encoder layers and the pooler layer. Should be a multiple of 24.
39
+ num_hidden_layers (`int`, *optional*, defaults to 12):
40
+ Number of hidden layers in the Transformer encoder.
41
+ num_attention_heads (`int`, *optional*, defaults to 12):
42
+ Number of attention heads for each attention layer in the Transformer encoder.
43
+ intermediate_size (`int`, *optional*, defaults to 3072):
44
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
45
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
46
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
47
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
48
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
49
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
50
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
51
+ The dropout ratio for the attention probabilities.
52
+ max_position_embeddings (`int`, *optional*, defaults to 512):
53
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
54
+ just in case (e.g., 512 or 1024 or 2048).
55
+ type_vocab_size (`int`, *optional*, defaults to 2):
56
+ The vocabulary size of the `token_type_ids` passed when calling [`LiltModel`].
57
+ initializer_range (`float`, *optional*, defaults to 0.02):
58
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
59
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
60
+ The epsilon used by the layer normalization layers.
61
+ position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
62
+ Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
63
+ positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
64
+ [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
65
+ For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
66
+ with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
67
+ classifier_dropout (`float`, *optional*):
68
+ The dropout ratio for the classification head.
69
+ channel_shrink_ratio (`int`, *optional*, defaults to 4):
70
+ The shrink ratio compared to the `hidden_size` for the channel dimension of the layout embeddings.
71
+ max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
72
+ The maximum value that the 2D position embedding might ever be used with. Typically set this to something
73
+ large just in case (e.g., 1024).
74
+
75
+ Examples:
76
+
77
+ ```python
78
+ >>> from transformers import LiltConfig, LiltModel
79
+
80
+ >>> # Initializing a LiLT SCUT-DLVCLab/lilt-roberta-en-base style configuration
81
+ >>> configuration = LiltConfig()
82
+ >>> # Randomly initializing a model from the SCUT-DLVCLab/lilt-roberta-en-base style configuration
83
+ >>> model = LiltModel(configuration)
84
+ >>> # Accessing the model configuration
85
+ >>> configuration = model.config
86
+ ```"""
87
+
88
+ model_type = "lilt"
89
+
90
+ def __init__(
91
+ self,
92
+ vocab_size=30522,
93
+ hidden_size=768,
94
+ num_hidden_layers=12,
95
+ num_attention_heads=12,
96
+ intermediate_size=3072,
97
+ hidden_act="gelu",
98
+ hidden_dropout_prob=0.1,
99
+ attention_probs_dropout_prob=0.1,
100
+ max_position_embeddings=512,
101
+ type_vocab_size=2,
102
+ initializer_range=0.02,
103
+ layer_norm_eps=1e-12,
104
+ pad_token_id=0,
105
+ position_embedding_type="absolute",
106
+ classifier_dropout=None,
107
+ channel_shrink_ratio=4,
108
+ max_2d_position_embeddings=1024,
109
+ **kwargs,
110
+ ):
111
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
112
+
113
+ self.vocab_size = vocab_size
114
+ self.hidden_size = hidden_size
115
+ self.num_hidden_layers = num_hidden_layers
116
+ self.num_attention_heads = num_attention_heads
117
+ self.hidden_act = hidden_act
118
+ self.intermediate_size = intermediate_size
119
+ self.hidden_dropout_prob = hidden_dropout_prob
120
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
121
+ self.max_position_embeddings = max_position_embeddings
122
+ self.type_vocab_size = type_vocab_size
123
+ self.initializer_range = initializer_range
124
+ self.layer_norm_eps = layer_norm_eps
125
+ self.position_embedding_type = position_embedding_type
126
+ self.classifier_dropout = classifier_dropout
127
+ self.channel_shrink_ratio = channel_shrink_ratio
128
+ self.max_2d_position_embeddings = max_2d_position_embeddings
129
+
130
+
131
+ __all__ = ["LiltConfig"]
janus/lib/python3.10/site-packages/transformers/models/lilt/modeling_lilt.py ADDED
@@ -0,0 +1,1192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch LiLT model."""
16
+
17
+ import math
18
+ from typing import Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import (
27
+ BaseModelOutput,
28
+ BaseModelOutputWithPooling,
29
+ QuestionAnsweringModelOutput,
30
+ SequenceClassifierOutput,
31
+ TokenClassifierOutput,
32
+ )
33
+ from ...modeling_utils import PreTrainedModel
34
+ from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
35
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
36
+ from .configuration_lilt import LiltConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ _CONFIG_FOR_DOC = "LiltConfig"
42
+
43
+
44
+ class LiltTextEmbeddings(nn.Module):
45
+ def __init__(self, config):
46
+ super().__init__()
47
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
48
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
49
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
50
+
51
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
52
+ # any TensorFlow checkpoint file
53
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
54
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
55
+
56
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
57
+ self.register_buffer(
58
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
59
+ )
60
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
61
+
62
+ # End copy
63
+ self.padding_idx = config.pad_token_id
64
+ self.position_embeddings = nn.Embedding(
65
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
66
+ )
67
+
68
+ def forward(
69
+ self,
70
+ input_ids=None,
71
+ token_type_ids=None,
72
+ position_ids=None,
73
+ inputs_embeds=None,
74
+ ):
75
+ if position_ids is None:
76
+ if input_ids is not None:
77
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
78
+ position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(
79
+ input_ids.device
80
+ )
81
+ else:
82
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
83
+
84
+ if input_ids is not None:
85
+ input_shape = input_ids.size()
86
+ else:
87
+ input_shape = inputs_embeds.size()[:-1]
88
+
89
+ if token_type_ids is None:
90
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
91
+
92
+ if inputs_embeds is None:
93
+ inputs_embeds = self.word_embeddings(input_ids)
94
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
95
+
96
+ embeddings = inputs_embeds + token_type_embeddings
97
+ if self.position_embedding_type == "absolute":
98
+ position_embeddings = self.position_embeddings(position_ids)
99
+ embeddings += position_embeddings
100
+ embeddings = self.LayerNorm(embeddings)
101
+ embeddings = self.dropout(embeddings)
102
+ return embeddings, position_ids
103
+
104
+ def create_position_ids_from_input_ids(self, input_ids, padding_idx):
105
+ """
106
+ Args:
107
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
108
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
109
+ x: torch.Tensor x:
110
+ Returns: torch.Tensor
111
+ """
112
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
113
+ mask = input_ids.ne(padding_idx).int()
114
+ incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
115
+ return incremental_indices.long() + padding_idx
116
+
117
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
118
+ """
119
+ Args:
120
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.:
121
+ inputs_embeds: torch.Tensor
122
+ Returns: torch.Tensor
123
+ """
124
+ input_shape = inputs_embeds.size()[:-1]
125
+ sequence_length = input_shape[1]
126
+
127
+ position_ids = torch.arange(
128
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
129
+ )
130
+ return position_ids.unsqueeze(0).expand(input_shape)
131
+
132
+
133
+ class LiltLayoutEmbeddings(nn.Module):
134
+ def __init__(self, config):
135
+ super().__init__()
136
+ # we divide the hidden_size by 6 here as there are 6 different layout embeddings,
137
+ # namely left_position, upper_position, right_position, lower_position, height, width
138
+ self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
139
+ self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
140
+ self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
141
+ self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6)
142
+
143
+ self.padding_idx = config.pad_token_id
144
+ self.box_position_embeddings = nn.Embedding(
145
+ config.max_position_embeddings,
146
+ config.hidden_size // config.channel_shrink_ratio,
147
+ padding_idx=self.padding_idx,
148
+ )
149
+ self.box_linear_embeddings = nn.Linear(
150
+ in_features=config.hidden_size, out_features=config.hidden_size // config.channel_shrink_ratio
151
+ )
152
+ self.LayerNorm = nn.LayerNorm(config.hidden_size // config.channel_shrink_ratio, eps=config.layer_norm_eps)
153
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
154
+
155
+ def forward(self, bbox=None, position_ids=None):
156
+ try:
157
+ left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
158
+ upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
159
+ right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
160
+ lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
161
+ except IndexError as e:
162
+ raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
163
+
164
+ h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
165
+ w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
166
+
167
+ spatial_position_embeddings = torch.cat(
168
+ [
169
+ left_position_embeddings,
170
+ upper_position_embeddings,
171
+ right_position_embeddings,
172
+ lower_position_embeddings,
173
+ h_position_embeddings,
174
+ w_position_embeddings,
175
+ ],
176
+ dim=-1,
177
+ )
178
+ spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings)
179
+ box_position_embeddings = self.box_position_embeddings(position_ids)
180
+
181
+ spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings
182
+
183
+ spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings)
184
+ spatial_position_embeddings = self.dropout(spatial_position_embeddings)
185
+
186
+ return spatial_position_embeddings
187
+
188
+
189
+ class LiltSelfAttention(nn.Module):
190
+ def __init__(self, config, position_embedding_type=None):
191
+ super().__init__()
192
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
193
+ raise ValueError(
194
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
195
+ f"heads ({config.num_attention_heads})"
196
+ )
197
+
198
+ self.num_attention_heads = config.num_attention_heads
199
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
200
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
201
+
202
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
203
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
204
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
205
+
206
+ self.layout_query = nn.Linear(
207
+ config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio
208
+ )
209
+ self.layout_key = nn.Linear(
210
+ config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio
211
+ )
212
+ self.layout_value = nn.Linear(
213
+ config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio
214
+ )
215
+
216
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
217
+ self.position_embedding_type = position_embedding_type or getattr(
218
+ config, "position_embedding_type", "absolute"
219
+ )
220
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
221
+ self.max_position_embeddings = config.max_position_embeddings
222
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
223
+
224
+ self.channel_shrink_ratio = config.channel_shrink_ratio
225
+
226
+ def transpose_for_scores(self, x, r=1):
227
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size // r)
228
+ x = x.view(*new_x_shape)
229
+ return x.permute(0, 2, 1, 3)
230
+
231
+ def forward(
232
+ self,
233
+ hidden_states,
234
+ layout_inputs,
235
+ attention_mask=None,
236
+ head_mask=None,
237
+ output_attentions=False,
238
+ ):
239
+ layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio)
240
+ layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio)
241
+ layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio)
242
+
243
+ mixed_query_layer = self.query(hidden_states)
244
+
245
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
246
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
247
+ query_layer = self.transpose_for_scores(mixed_query_layer)
248
+
249
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
250
+ layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2))
251
+
252
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
253
+ seq_length = hidden_states.size()[1]
254
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
255
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
256
+ distance = position_ids_l - position_ids_r
257
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
258
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
259
+
260
+ if self.position_embedding_type == "relative_key":
261
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
262
+ attention_scores = attention_scores + relative_position_scores
263
+ elif self.position_embedding_type == "relative_key_query":
264
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
265
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
266
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
267
+
268
+ tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size)
269
+ tmp_layout_attention_scores = layout_attention_scores / math.sqrt(
270
+ self.attention_head_size // self.channel_shrink_ratio
271
+ )
272
+ attention_scores = tmp_attention_scores + tmp_layout_attention_scores
273
+ layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores
274
+
275
+ if attention_mask is not None:
276
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
277
+ layout_attention_scores = layout_attention_scores + attention_mask
278
+
279
+ # Normalize the attention scores to probabilities.
280
+ layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores)
281
+
282
+ # This is actually dropping out entire tokens to attend to, which might
283
+ # seem a bit unusual, but is taken from the original Transformer paper.
284
+ layout_attention_probs = self.dropout(layout_attention_probs)
285
+
286
+ # Mask heads if we want to
287
+ if head_mask is not None:
288
+ layout_attention_probs = layout_attention_probs * head_mask
289
+
290
+ layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer)
291
+
292
+ layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous()
293
+ new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size // self.channel_shrink_ratio,)
294
+ layout_context_layer = layout_context_layer.view(*new_context_layer_shape)
295
+
296
+ if attention_mask is not None:
297
+ # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
298
+ attention_scores = attention_scores + attention_mask
299
+
300
+ # Normalize the attention scores to probabilities.
301
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
302
+
303
+ # This is actually dropping out entire tokens to attend to, which might
304
+ # seem a bit unusual, but is taken from the original Transformer paper.
305
+ attention_probs = self.dropout(attention_probs)
306
+
307
+ # Mask heads if we want to
308
+ if head_mask is not None:
309
+ attention_probs = attention_probs * head_mask
310
+
311
+ context_layer = torch.matmul(attention_probs, value_layer)
312
+
313
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
314
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
315
+ context_layer = context_layer.view(*new_context_layer_shape)
316
+
317
+ outputs = (
318
+ ((context_layer, layout_context_layer), attention_probs)
319
+ if output_attentions
320
+ else ((context_layer, layout_context_layer),)
321
+ )
322
+
323
+ return outputs
324
+
325
+
326
+ # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
327
+ class LiltSelfOutput(nn.Module):
328
+ def __init__(self, config):
329
+ super().__init__()
330
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
331
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
332
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
333
+
334
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
335
+ hidden_states = self.dense(hidden_states)
336
+ hidden_states = self.dropout(hidden_states)
337
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
338
+ return hidden_states
339
+
340
+
341
+ class LiltAttention(nn.Module):
342
+ def __init__(self, config, position_embedding_type=None):
343
+ super().__init__()
344
+ self.self = LiltSelfAttention(config, position_embedding_type=position_embedding_type)
345
+ self.output = LiltSelfOutput(config)
346
+ self.pruned_heads = set()
347
+
348
+ ori_hidden_size = config.hidden_size
349
+ config.hidden_size = config.hidden_size // config.channel_shrink_ratio
350
+ self.layout_output = LiltSelfOutput(config)
351
+ config.hidden_size = ori_hidden_size
352
+
353
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
354
+ def prune_heads(self, heads):
355
+ if len(heads) == 0:
356
+ return
357
+ heads, index = find_pruneable_heads_and_indices(
358
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
359
+ )
360
+
361
+ # Prune linear layers
362
+ self.self.query = prune_linear_layer(self.self.query, index)
363
+ self.self.key = prune_linear_layer(self.self.key, index)
364
+ self.self.value = prune_linear_layer(self.self.value, index)
365
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
366
+
367
+ # Update hyper params and store pruned heads
368
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
369
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
370
+ self.pruned_heads = self.pruned_heads.union(heads)
371
+
372
+ def forward(
373
+ self,
374
+ hidden_states: torch.Tensor,
375
+ layout_inputs: torch.Tensor,
376
+ attention_mask: Optional[torch.FloatTensor] = None,
377
+ head_mask: Optional[torch.FloatTensor] = None,
378
+ output_attentions: Optional[bool] = False,
379
+ ) -> Tuple[torch.Tensor]:
380
+ self_outputs = self.self(
381
+ hidden_states,
382
+ layout_inputs,
383
+ attention_mask,
384
+ head_mask,
385
+ output_attentions,
386
+ )
387
+ attention_output = self.output(self_outputs[0][0], hidden_states)
388
+ layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs)
389
+ outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them
390
+ return outputs
391
+
392
+
393
+ # Copied from transformers.models.bert.modeling_bert.BertIntermediate
394
+ class LiltIntermediate(nn.Module):
395
+ def __init__(self, config):
396
+ super().__init__()
397
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
398
+ if isinstance(config.hidden_act, str):
399
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
400
+ else:
401
+ self.intermediate_act_fn = config.hidden_act
402
+
403
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
404
+ hidden_states = self.dense(hidden_states)
405
+ hidden_states = self.intermediate_act_fn(hidden_states)
406
+ return hidden_states
407
+
408
+
409
+ # Copied from transformers.models.bert.modeling_bert.BertOutput
410
+ class LiltOutput(nn.Module):
411
+ def __init__(self, config):
412
+ super().__init__()
413
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
414
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
415
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
416
+
417
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
418
+ hidden_states = self.dense(hidden_states)
419
+ hidden_states = self.dropout(hidden_states)
420
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
421
+ return hidden_states
422
+
423
+
424
+ class LiltLayer(nn.Module):
425
+ def __init__(self, config):
426
+ super().__init__()
427
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
428
+ self.seq_len_dim = 1
429
+ self.attention = LiltAttention(config)
430
+ self.intermediate = LiltIntermediate(config)
431
+ self.output = LiltOutput(config)
432
+
433
+ ori_hidden_size = config.hidden_size
434
+ ori_intermediate_size = config.intermediate_size
435
+ config.hidden_size = config.hidden_size // config.channel_shrink_ratio
436
+ config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio
437
+ self.layout_intermediate = LiltIntermediate(config)
438
+ self.layout_output = LiltOutput(config)
439
+ config.hidden_size = ori_hidden_size
440
+ config.intermediate_size = ori_intermediate_size
441
+
442
+ def forward(
443
+ self,
444
+ hidden_states: torch.Tensor,
445
+ layout_inputs: torch.Tensor,
446
+ attention_mask: Optional[torch.FloatTensor] = None,
447
+ head_mask: Optional[torch.FloatTensor] = None,
448
+ output_attentions: Optional[bool] = False,
449
+ ) -> Tuple[torch.Tensor]:
450
+ self_attention_outputs = self.attention(
451
+ hidden_states,
452
+ layout_inputs,
453
+ attention_mask,
454
+ head_mask,
455
+ output_attentions=output_attentions,
456
+ )
457
+ attention_output = self_attention_outputs[0][0]
458
+ layout_attention_output = self_attention_outputs[0][1]
459
+
460
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
461
+
462
+ layer_output = apply_chunking_to_forward(
463
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
464
+ )
465
+ layout_layer_output = apply_chunking_to_forward(
466
+ self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output
467
+ )
468
+ outputs = ((layer_output, layout_layer_output),) + outputs
469
+
470
+ return outputs
471
+
472
+ # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk
473
+ def feed_forward_chunk(self, attention_output):
474
+ intermediate_output = self.intermediate(attention_output)
475
+ layer_output = self.output(intermediate_output, attention_output)
476
+ return layer_output
477
+
478
+ def layout_feed_forward_chunk(self, attention_output):
479
+ intermediate_output = self.layout_intermediate(attention_output)
480
+ layer_output = self.layout_output(intermediate_output, attention_output)
481
+ return layer_output
482
+
483
+
484
+ class LiltEncoder(nn.Module):
485
+ # Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Lilt
486
+ def __init__(self, config):
487
+ super().__init__()
488
+ self.config = config
489
+ self.layer = nn.ModuleList([LiltLayer(config) for _ in range(config.num_hidden_layers)])
490
+ self.gradient_checkpointing = False
491
+
492
+ def forward(
493
+ self,
494
+ hidden_states: torch.Tensor,
495
+ layout_inputs: torch.Tensor,
496
+ attention_mask: Optional[torch.FloatTensor] = None,
497
+ head_mask: Optional[torch.FloatTensor] = None,
498
+ output_attentions: Optional[bool] = False,
499
+ output_hidden_states: Optional[bool] = False,
500
+ return_dict: Optional[bool] = True,
501
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutput]:
502
+ all_hidden_states = () if output_hidden_states else None
503
+ all_self_attentions = () if output_attentions else None
504
+
505
+ for i, layer_module in enumerate(self.layer):
506
+ if output_hidden_states:
507
+ all_hidden_states = all_hidden_states + (hidden_states,)
508
+
509
+ layer_head_mask = head_mask[i] if head_mask is not None else None
510
+
511
+ if self.gradient_checkpointing and self.training:
512
+ layer_outputs = self._gradient_checkpointing_func(
513
+ layer_module.__call__,
514
+ hidden_states,
515
+ layout_inputs,
516
+ attention_mask,
517
+ layer_head_mask,
518
+ output_attentions,
519
+ )
520
+ else:
521
+ layer_outputs = layer_module(
522
+ hidden_states,
523
+ layout_inputs,
524
+ attention_mask,
525
+ layer_head_mask,
526
+ output_attentions,
527
+ )
528
+
529
+ hidden_states = layer_outputs[0][0]
530
+ layout_inputs = layer_outputs[0][1]
531
+
532
+ if output_attentions:
533
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
534
+
535
+ if output_hidden_states:
536
+ all_hidden_states = all_hidden_states + (hidden_states,)
537
+
538
+ if not return_dict:
539
+ return tuple(
540
+ v
541
+ for v in [
542
+ hidden_states,
543
+ all_hidden_states,
544
+ all_self_attentions,
545
+ ]
546
+ if v is not None
547
+ )
548
+ return BaseModelOutput(
549
+ last_hidden_state=hidden_states,
550
+ hidden_states=all_hidden_states,
551
+ attentions=all_self_attentions,
552
+ )
553
+
554
+
555
+ # Copied from transformers.models.bert.modeling_bert.BertPooler
556
+ class LiltPooler(nn.Module):
557
+ def __init__(self, config):
558
+ super().__init__()
559
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
560
+ self.activation = nn.Tanh()
561
+
562
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
563
+ # We "pool" the model by simply taking the hidden state corresponding
564
+ # to the first token.
565
+ first_token_tensor = hidden_states[:, 0]
566
+ pooled_output = self.dense(first_token_tensor)
567
+ pooled_output = self.activation(pooled_output)
568
+ return pooled_output
569
+
570
+
571
+ class LiltPreTrainedModel(PreTrainedModel):
572
+ """
573
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
574
+ models.
575
+ """
576
+
577
+ config_class = LiltConfig
578
+ base_model_prefix = "lilt"
579
+ supports_gradient_checkpointing = True
580
+ _no_split_modules = []
581
+
582
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
583
+ def _init_weights(self, module):
584
+ """Initialize the weights"""
585
+ if isinstance(module, nn.Linear):
586
+ # Slightly different from the TF version which uses truncated_normal for initialization
587
+ # cf https://github.com/pytorch/pytorch/pull/5617
588
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
589
+ if module.bias is not None:
590
+ module.bias.data.zero_()
591
+ elif isinstance(module, nn.Embedding):
592
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
593
+ if module.padding_idx is not None:
594
+ module.weight.data[module.padding_idx].zero_()
595
+ elif isinstance(module, nn.LayerNorm):
596
+ module.bias.data.zero_()
597
+ module.weight.data.fill_(1.0)
598
+
599
+
600
+ LILT_START_DOCSTRING = r"""
601
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
602
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
603
+ etc.)
604
+
605
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
606
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
607
+ and behavior.
608
+
609
+ Parameters:
610
+ config ([`LiltConfig`]): Model configuration class with all the parameters of the
611
+ model. Initializing with a config file does not load the weights associated with the model, only the
612
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
613
+ """
614
+
615
+ LILT_INPUTS_DOCSTRING = r"""
616
+ Args:
617
+ input_ids (`torch.LongTensor` of shape `({0})`):
618
+ Indices of input sequence tokens in the vocabulary.
619
+
620
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
621
+ [`PreTrainedTokenizer.__call__`] for details.
622
+
623
+ [What are input IDs?](../glossary#input-ids)
624
+
625
+ bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
626
+ Bounding boxes of each input sequence tokens. Selected in the range `[0,
627
+ config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
628
+ format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
629
+ y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.
630
+
631
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
632
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
633
+
634
+ - 1 for tokens that are **not masked**,
635
+ - 0 for tokens that are **masked**.
636
+
637
+ [What are attention masks?](../glossary#attention-mask)
638
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
639
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
640
+ 1]`:
641
+
642
+ - 0 corresponds to a *sentence A* token,
643
+ - 1 corresponds to a *sentence B* token.
644
+
645
+ [What are token type IDs?](../glossary#token-type-ids)
646
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
647
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
648
+ config.max_position_embeddings - 1]`.
649
+
650
+ [What are position IDs?](../glossary#position-ids)
651
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
652
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
653
+
654
+ - 1 indicates the head is **not masked**,
655
+ - 0 indicates the head is **masked**.
656
+
657
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
658
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
659
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
660
+ model's internal embedding lookup matrix.
661
+ output_attentions (`bool`, *optional*):
662
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
663
+ tensors for more detail.
664
+ output_hidden_states (`bool`, *optional*):
665
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
666
+ more detail.
667
+ return_dict (`bool`, *optional*):
668
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
669
+ """
670
+
671
+
672
+ @add_start_docstrings(
673
+ "The bare LiLT Model transformer outputting raw hidden-states without any specific head on top.",
674
+ LILT_START_DOCSTRING,
675
+ )
676
+ class LiltModel(LiltPreTrainedModel):
677
+ def __init__(self, config, add_pooling_layer=True):
678
+ super().__init__(config)
679
+ self.config = config
680
+
681
+ self.embeddings = LiltTextEmbeddings(config)
682
+ self.layout_embeddings = LiltLayoutEmbeddings(config)
683
+ self.encoder = LiltEncoder(config)
684
+
685
+ self.pooler = LiltPooler(config) if add_pooling_layer else None
686
+
687
+ # Initialize weights and apply final processing
688
+ self.post_init()
689
+
690
+ def get_input_embeddings(self):
691
+ return self.embeddings.word_embeddings
692
+
693
+ def set_input_embeddings(self, value):
694
+ self.embeddings.word_embeddings = value
695
+
696
+ def _prune_heads(self, heads_to_prune):
697
+ """
698
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
699
+ class PreTrainedModel
700
+ """
701
+ for layer, heads in heads_to_prune.items():
702
+ self.encoder.layer[layer].attention.prune_heads(heads)
703
+
704
+ @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
705
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
706
+ def forward(
707
+ self,
708
+ input_ids: Optional[torch.Tensor] = None,
709
+ bbox: Optional[torch.Tensor] = None,
710
+ attention_mask: Optional[torch.Tensor] = None,
711
+ token_type_ids: Optional[torch.Tensor] = None,
712
+ position_ids: Optional[torch.Tensor] = None,
713
+ head_mask: Optional[torch.Tensor] = None,
714
+ inputs_embeds: Optional[torch.Tensor] = None,
715
+ output_attentions: Optional[bool] = None,
716
+ output_hidden_states: Optional[bool] = None,
717
+ return_dict: Optional[bool] = None,
718
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
719
+ r"""
720
+
721
+ Returns:
722
+
723
+ Examples:
724
+
725
+ ```python
726
+ >>> from transformers import AutoTokenizer, AutoModel
727
+ >>> from datasets import load_dataset
728
+
729
+ >>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
730
+ >>> model = AutoModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
731
+
732
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True)
733
+ >>> example = dataset[0]
734
+ >>> words = example["tokens"]
735
+ >>> boxes = example["bboxes"]
736
+
737
+ >>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
738
+
739
+ >>> outputs = model(**encoding)
740
+ >>> last_hidden_states = outputs.last_hidden_state
741
+ ```"""
742
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
743
+ output_hidden_states = (
744
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
745
+ )
746
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
747
+
748
+ if input_ids is not None and inputs_embeds is not None:
749
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
750
+ elif input_ids is not None:
751
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
752
+ input_shape = input_ids.size()
753
+ elif inputs_embeds is not None:
754
+ input_shape = inputs_embeds.size()[:-1]
755
+ else:
756
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
757
+
758
+ batch_size, seq_length = input_shape
759
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
760
+
761
+ if bbox is None:
762
+ bbox = torch.zeros(input_shape + (4,), dtype=torch.long, device=device)
763
+
764
+ if attention_mask is None:
765
+ attention_mask = torch.ones(((batch_size, seq_length)), device=device)
766
+
767
+ if token_type_ids is None:
768
+ if hasattr(self.embeddings, "token_type_ids"):
769
+ buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
770
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
771
+ token_type_ids = buffered_token_type_ids_expanded
772
+ else:
773
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
774
+
775
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
776
+ # ourselves in which case we just need to make it broadcastable to all heads.
777
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
778
+
779
+ # Prepare head mask if needed
780
+ # 1.0 in head_mask indicate we keep the head
781
+ # attention_probs has shape bsz x n_heads x N x N
782
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
783
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
784
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
785
+
786
+ embedding_output, position_ids = self.embeddings(
787
+ input_ids=input_ids,
788
+ position_ids=position_ids,
789
+ token_type_ids=token_type_ids,
790
+ inputs_embeds=inputs_embeds,
791
+ )
792
+
793
+ layout_embedding_output = self.layout_embeddings(bbox=bbox, position_ids=position_ids)
794
+
795
+ encoder_outputs = self.encoder(
796
+ embedding_output,
797
+ layout_embedding_output,
798
+ attention_mask=extended_attention_mask,
799
+ head_mask=head_mask,
800
+ output_attentions=output_attentions,
801
+ output_hidden_states=output_hidden_states,
802
+ return_dict=return_dict,
803
+ )
804
+ sequence_output = encoder_outputs[0]
805
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
806
+
807
+ if not return_dict:
808
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
809
+
810
+ return BaseModelOutputWithPooling(
811
+ last_hidden_state=sequence_output,
812
+ pooler_output=pooled_output,
813
+ hidden_states=encoder_outputs.hidden_states,
814
+ attentions=encoder_outputs.attentions,
815
+ )
816
+
817
+
818
+ @add_start_docstrings(
819
+ """
820
+ LiLT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
821
+ output) e.g. for GLUE tasks.
822
+ """,
823
+ LILT_START_DOCSTRING,
824
+ )
825
+ class LiltForSequenceClassification(LiltPreTrainedModel):
826
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification.__init__ with Roberta->Lilt, roberta->lilt
827
+ def __init__(self, config):
828
+ super().__init__(config)
829
+ self.num_labels = config.num_labels
830
+ self.config = config
831
+
832
+ self.lilt = LiltModel(config, add_pooling_layer=False)
833
+ self.classifier = LiltClassificationHead(config)
834
+
835
+ # Initialize weights and apply final processing
836
+ self.post_init()
837
+
838
+ @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
839
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
840
+ def forward(
841
+ self,
842
+ input_ids: Optional[torch.LongTensor] = None,
843
+ bbox: Optional[torch.Tensor] = None,
844
+ attention_mask: Optional[torch.FloatTensor] = None,
845
+ token_type_ids: Optional[torch.LongTensor] = None,
846
+ position_ids: Optional[torch.LongTensor] = None,
847
+ head_mask: Optional[torch.FloatTensor] = None,
848
+ inputs_embeds: Optional[torch.FloatTensor] = None,
849
+ labels: Optional[torch.LongTensor] = None,
850
+ output_attentions: Optional[bool] = None,
851
+ output_hidden_states: Optional[bool] = None,
852
+ return_dict: Optional[bool] = None,
853
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
854
+ r"""
855
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
856
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
857
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
858
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
859
+
860
+ Returns:
861
+
862
+ Examples:
863
+
864
+ ```python
865
+ >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
866
+ >>> from datasets import load_dataset
867
+
868
+ >>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
869
+ >>> model = AutoModelForSequenceClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
870
+
871
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True)
872
+ >>> example = dataset[0]
873
+ >>> words = example["tokens"]
874
+ >>> boxes = example["bboxes"]
875
+
876
+ >>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
877
+
878
+ >>> outputs = model(**encoding)
879
+ >>> predicted_class_idx = outputs.logits.argmax(-1).item()
880
+ >>> predicted_class = model.config.id2label[predicted_class_idx]
881
+ ```"""
882
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
883
+
884
+ outputs = self.lilt(
885
+ input_ids,
886
+ bbox=bbox,
887
+ attention_mask=attention_mask,
888
+ token_type_ids=token_type_ids,
889
+ position_ids=position_ids,
890
+ head_mask=head_mask,
891
+ inputs_embeds=inputs_embeds,
892
+ output_attentions=output_attentions,
893
+ output_hidden_states=output_hidden_states,
894
+ return_dict=return_dict,
895
+ )
896
+ sequence_output = outputs[0]
897
+ logits = self.classifier(sequence_output)
898
+
899
+ loss = None
900
+ if labels is not None:
901
+ # move labels to correct device to enable model parallelism
902
+ labels = labels.to(logits.device)
903
+ if self.config.problem_type is None:
904
+ if self.num_labels == 1:
905
+ self.config.problem_type = "regression"
906
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
907
+ self.config.problem_type = "single_label_classification"
908
+ else:
909
+ self.config.problem_type = "multi_label_classification"
910
+
911
+ if self.config.problem_type == "regression":
912
+ loss_fct = MSELoss()
913
+ if self.num_labels == 1:
914
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
915
+ else:
916
+ loss = loss_fct(logits, labels)
917
+ elif self.config.problem_type == "single_label_classification":
918
+ loss_fct = CrossEntropyLoss()
919
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
920
+ elif self.config.problem_type == "multi_label_classification":
921
+ loss_fct = BCEWithLogitsLoss()
922
+ loss = loss_fct(logits, labels)
923
+
924
+ if not return_dict:
925
+ output = (logits,) + outputs[2:]
926
+ return ((loss,) + output) if loss is not None else output
927
+
928
+ return SequenceClassifierOutput(
929
+ loss=loss,
930
+ logits=logits,
931
+ hidden_states=outputs.hidden_states,
932
+ attentions=outputs.attentions,
933
+ )
934
+
935
+
936
+ @add_start_docstrings(
937
+ """
938
+ Lilt Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
939
+ Named-Entity-Recognition (NER) tasks.
940
+ """,
941
+ LILT_START_DOCSTRING,
942
+ )
943
+ class LiltForTokenClassification(LiltPreTrainedModel):
944
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification.__init__ with Roberta->Lilt, roberta->lilt
945
+ def __init__(self, config):
946
+ super().__init__(config)
947
+ self.num_labels = config.num_labels
948
+
949
+ self.lilt = LiltModel(config, add_pooling_layer=False)
950
+ classifier_dropout = (
951
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
952
+ )
953
+ self.dropout = nn.Dropout(classifier_dropout)
954
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
955
+
956
+ # Initialize weights and apply final processing
957
+ self.post_init()
958
+
959
+ @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
960
+ @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
961
+ def forward(
962
+ self,
963
+ input_ids: Optional[torch.LongTensor] = None,
964
+ bbox: Optional[torch.LongTensor] = None,
965
+ attention_mask: Optional[torch.FloatTensor] = None,
966
+ token_type_ids: Optional[torch.LongTensor] = None,
967
+ position_ids: Optional[torch.LongTensor] = None,
968
+ head_mask: Optional[torch.FloatTensor] = None,
969
+ inputs_embeds: Optional[torch.FloatTensor] = None,
970
+ labels: Optional[torch.LongTensor] = None,
971
+ output_attentions: Optional[bool] = None,
972
+ output_hidden_states: Optional[bool] = None,
973
+ return_dict: Optional[bool] = None,
974
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
975
+ r"""
976
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
977
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
978
+
979
+ Returns:
980
+
981
+ Examples:
982
+
983
+ ```python
984
+ >>> from transformers import AutoTokenizer, AutoModelForTokenClassification
985
+ >>> from datasets import load_dataset
986
+
987
+ >>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
988
+ >>> model = AutoModelForTokenClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
989
+
990
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True)
991
+ >>> example = dataset[0]
992
+ >>> words = example["tokens"]
993
+ >>> boxes = example["bboxes"]
994
+
995
+ >>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
996
+
997
+ >>> outputs = model(**encoding)
998
+ >>> predicted_class_indices = outputs.logits.argmax(-1)
999
+ ```"""
1000
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1001
+
1002
+ outputs = self.lilt(
1003
+ input_ids,
1004
+ bbox=bbox,
1005
+ attention_mask=attention_mask,
1006
+ token_type_ids=token_type_ids,
1007
+ position_ids=position_ids,
1008
+ head_mask=head_mask,
1009
+ inputs_embeds=inputs_embeds,
1010
+ output_attentions=output_attentions,
1011
+ output_hidden_states=output_hidden_states,
1012
+ return_dict=return_dict,
1013
+ )
1014
+
1015
+ sequence_output = outputs[0]
1016
+
1017
+ sequence_output = self.dropout(sequence_output)
1018
+ logits = self.classifier(sequence_output)
1019
+
1020
+ loss = None
1021
+ if labels is not None:
1022
+ # move labels to correct device to enable model parallelism
1023
+ labels = labels.to(logits.device)
1024
+ loss_fct = CrossEntropyLoss()
1025
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1026
+
1027
+ if not return_dict:
1028
+ output = (logits,) + outputs[2:]
1029
+ return ((loss,) + output) if loss is not None else output
1030
+
1031
+ return TokenClassifierOutput(
1032
+ loss=loss,
1033
+ logits=logits,
1034
+ hidden_states=outputs.hidden_states,
1035
+ attentions=outputs.attentions,
1036
+ )
1037
+
1038
+
1039
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Lilt
1040
+ class LiltClassificationHead(nn.Module):
1041
+ """Head for sentence-level classification tasks."""
1042
+
1043
+ def __init__(self, config):
1044
+ super().__init__()
1045
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
1046
+ classifier_dropout = (
1047
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1048
+ )
1049
+ self.dropout = nn.Dropout(classifier_dropout)
1050
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
1051
+
1052
+ def forward(self, features, **kwargs):
1053
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
1054
+ x = self.dropout(x)
1055
+ x = self.dense(x)
1056
+ x = torch.tanh(x)
1057
+ x = self.dropout(x)
1058
+ x = self.out_proj(x)
1059
+ return x
1060
+
1061
+
1062
+ @add_start_docstrings(
1063
+ """
1064
+ Lilt Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
1065
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1066
+ """,
1067
+ LILT_START_DOCSTRING,
1068
+ )
1069
+ class LiltForQuestionAnswering(LiltPreTrainedModel):
1070
+ # Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering.__init__ with Roberta->Lilt, roberta->lilt
1071
+ def __init__(self, config):
1072
+ super().__init__(config)
1073
+ self.num_labels = config.num_labels
1074
+
1075
+ self.lilt = LiltModel(config, add_pooling_layer=False)
1076
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1077
+
1078
+ # Initialize weights and apply final processing
1079
+ self.post_init()
1080
+
1081
+ @add_start_docstrings_to_model_forward(LILT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1082
+ @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
1083
+ def forward(
1084
+ self,
1085
+ input_ids: Optional[torch.LongTensor] = None,
1086
+ bbox: Optional[torch.LongTensor] = None,
1087
+ attention_mask: Optional[torch.FloatTensor] = None,
1088
+ token_type_ids: Optional[torch.LongTensor] = None,
1089
+ position_ids: Optional[torch.LongTensor] = None,
1090
+ head_mask: Optional[torch.FloatTensor] = None,
1091
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1092
+ start_positions: Optional[torch.LongTensor] = None,
1093
+ end_positions: Optional[torch.LongTensor] = None,
1094
+ output_attentions: Optional[bool] = None,
1095
+ output_hidden_states: Optional[bool] = None,
1096
+ return_dict: Optional[bool] = None,
1097
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
1098
+ r"""
1099
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1100
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1101
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1102
+ are not taken into account for computing the loss.
1103
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1104
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1105
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1106
+ are not taken into account for computing the loss.
1107
+
1108
+ Returns:
1109
+
1110
+ Examples:
1111
+
1112
+ ```python
1113
+ >>> from transformers import AutoTokenizer, AutoModelForQuestionAnswering
1114
+ >>> from datasets import load_dataset
1115
+
1116
+ >>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
1117
+ >>> model = AutoModelForQuestionAnswering.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base")
1118
+
1119
+ >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True)
1120
+ >>> example = dataset[0]
1121
+ >>> words = example["tokens"]
1122
+ >>> boxes = example["bboxes"]
1123
+
1124
+ >>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt")
1125
+
1126
+ >>> outputs = model(**encoding)
1127
+
1128
+ >>> answer_start_index = outputs.start_logits.argmax()
1129
+ >>> answer_end_index = outputs.end_logits.argmax()
1130
+
1131
+ >>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1]
1132
+ >>> predicted_answer = tokenizer.decode(predict_answer_tokens)
1133
+ ```"""
1134
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1135
+
1136
+ outputs = self.lilt(
1137
+ input_ids,
1138
+ bbox=bbox,
1139
+ attention_mask=attention_mask,
1140
+ token_type_ids=token_type_ids,
1141
+ position_ids=position_ids,
1142
+ head_mask=head_mask,
1143
+ inputs_embeds=inputs_embeds,
1144
+ output_attentions=output_attentions,
1145
+ output_hidden_states=output_hidden_states,
1146
+ return_dict=return_dict,
1147
+ )
1148
+
1149
+ sequence_output = outputs[0]
1150
+
1151
+ logits = self.qa_outputs(sequence_output)
1152
+ start_logits, end_logits = logits.split(1, dim=-1)
1153
+ start_logits = start_logits.squeeze(-1).contiguous()
1154
+ end_logits = end_logits.squeeze(-1).contiguous()
1155
+
1156
+ total_loss = None
1157
+ if start_positions is not None and end_positions is not None:
1158
+ # If we are on multi-GPU, split add a dimension
1159
+ if len(start_positions.size()) > 1:
1160
+ start_positions = start_positions.squeeze(-1)
1161
+ if len(end_positions.size()) > 1:
1162
+ end_positions = end_positions.squeeze(-1)
1163
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1164
+ ignored_index = start_logits.size(1)
1165
+ start_positions = start_positions.clamp(0, ignored_index)
1166
+ end_positions = end_positions.clamp(0, ignored_index)
1167
+
1168
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1169
+ start_loss = loss_fct(start_logits, start_positions)
1170
+ end_loss = loss_fct(end_logits, end_positions)
1171
+ total_loss = (start_loss + end_loss) / 2
1172
+
1173
+ if not return_dict:
1174
+ output = (start_logits, end_logits) + outputs[2:]
1175
+ return ((total_loss,) + output) if total_loss is not None else output
1176
+
1177
+ return QuestionAnsweringModelOutput(
1178
+ loss=total_loss,
1179
+ start_logits=start_logits,
1180
+ end_logits=end_logits,
1181
+ hidden_states=outputs.hidden_states,
1182
+ attentions=outputs.attentions,
1183
+ )
1184
+
1185
+
1186
+ __all__ = [
1187
+ "LiltForQuestionAnswering",
1188
+ "LiltForSequenceClassification",
1189
+ "LiltForTokenClassification",
1190
+ "LiltModel",
1191
+ "LiltPreTrainedModel",
1192
+ ]
janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (634 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/configuration_lxmert.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_lxmert.cpython-310.pyc ADDED
Binary file (46.6 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/modeling_tf_lxmert.cpython-310.pyc ADDED
Binary file (51.6 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert_fast.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:482c6409b2d5826dc5480df43e32a6f8168f2824c9338be1d3055e8736e93a3a
3
+ size 106853
janus/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (546 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/configuration_persimmon.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/persimmon/__pycache__/modeling_persimmon.cpython-310.pyc ADDED
Binary file (35.5 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/persimmon/configuration_persimmon.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Adept AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Persimmon model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...modeling_rope_utils import rope_config_validation
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class PersimmonConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`PersimmonModel`]. It is used to instantiate an
28
+ Persimmon model according to the specified arguments, defining the model architecture. Instantiating a
29
+ configuration with the defaults will yield a similar configuration to that of the
30
+ [adept/persimmon-8b-base](https://huggingface.co/adept/persimmon-8b-base).
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 262144):
38
+ Vocabulary size of the Persimmon model. Defines the number of different tokens that can be represented by
39
+ the `inputs_ids` passed when calling [`PersimmonModel`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 16384):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 36):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 64):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
49
+ The non-linear activation function (function or string) in the decoder.
50
+ max_position_embeddings (`int`, *optional*, defaults to 16384):
51
+ The maximum sequence length that this model might ever be used with.
52
+ initializer_range (`float`, *optional*, defaults to 0.02):
53
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
54
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
55
+ The epsilon used by the rms normalization layers.
56
+ use_cache (`bool`, *optional*, defaults to `True`):
57
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
58
+ relevant if `config.is_decoder=True`.
59
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
60
+ Whether to tie weight embeddings
61
+ rope_theta (`float`, *optional*, defaults to 25000.0):
62
+ The base period of the RoPE embeddings.
63
+ rope_scaling (`Dict`, *optional*):
64
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
65
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
66
+ accordingly.
67
+ Expected contents:
68
+ `rope_type` (`str`):
69
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
70
+ 'llama3'], with 'default' being the original RoPE implementation.
71
+ `factor` (`float`, *optional*):
72
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
73
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
74
+ original maximum pre-trained length.
75
+ `original_max_position_embeddings` (`int`, *optional*):
76
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
77
+ pretraining.
78
+ `attention_factor` (`float`, *optional*):
79
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
80
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
81
+ `factor` field to infer the suggested value.
82
+ `beta_fast` (`float`, *optional*):
83
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
84
+ ramp function. If unspecified, it defaults to 32.
85
+ `beta_slow` (`float`, *optional*):
86
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
87
+ ramp function. If unspecified, it defaults to 1.
88
+ `short_factor` (`List[float]`, *optional*):
89
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
90
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
91
+ size divided by the number of attention heads divided by 2
92
+ `long_factor` (`List[float]`, *optional*):
93
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
94
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
95
+ size divided by the number of attention heads divided by 2
96
+ `low_freq_factor` (`float`, *optional*):
97
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
98
+ `high_freq_factor` (`float`, *optional*):
99
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
100
+ qk_layernorm (`bool`, *optional*, default to `True`):
101
+ Whether or not to normalize the Queries and Keys after projecting the hidden states
102
+ hidden_dropout (`float`, *optional*, default to 0.0):
103
+ The dropout ratio after applying the MLP to the hidden states.
104
+ attention_dropout (`float`, *optional*, default to 0.0):
105
+ The dropout ratio after computing the attention scores.
106
+ partial_rotary_factor (`float`, *optional*, default to 0.5):
107
+ Percentage of the query and keys which will have rotary embedding.
108
+
109
+ Example:
110
+
111
+ ```python
112
+ >>> from transformers import PersimmonModel, PersimmonConfig
113
+
114
+ >>> # Initializing a Persimmon persimmon-7b style configuration
115
+ >>> configuration = PersimmonConfig()
116
+ ```"""
117
+
118
+ model_type = "persimmon"
119
+ keys_to_ignore_at_inference = ["past_key_values"]
120
+
121
+ def __init__(
122
+ self,
123
+ vocab_size=262144,
124
+ hidden_size=4096,
125
+ intermediate_size=16384,
126
+ num_hidden_layers=36,
127
+ num_attention_heads=64,
128
+ hidden_act="relu2",
129
+ max_position_embeddings=16384,
130
+ initializer_range=0.02,
131
+ layer_norm_eps=1e-5,
132
+ use_cache=True,
133
+ tie_word_embeddings=False,
134
+ rope_theta=25000.0,
135
+ rope_scaling=None,
136
+ qk_layernorm=True,
137
+ hidden_dropout=0.0,
138
+ attention_dropout=0.0,
139
+ partial_rotary_factor=0.5,
140
+ pad_token_id=None,
141
+ bos_token_id=1,
142
+ eos_token_id=2,
143
+ **kwargs,
144
+ ):
145
+ self.vocab_size = vocab_size
146
+ self.max_position_embeddings = max_position_embeddings
147
+ self.hidden_size = hidden_size
148
+ self.intermediate_size = intermediate_size
149
+ self.num_hidden_layers = num_hidden_layers
150
+ self.num_attention_heads = num_attention_heads
151
+ self.hidden_act = hidden_act
152
+ self.initializer_range = initializer_range
153
+ self.layer_norm_eps = layer_norm_eps
154
+ self.use_cache = use_cache
155
+ self.rope_theta = rope_theta
156
+ self.rope_scaling = rope_scaling
157
+ self.qk_layernorm = qk_layernorm
158
+ self.hidden_dropout = hidden_dropout
159
+ self.attention_dropout = attention_dropout
160
+ self.partial_rotary_factor = partial_rotary_factor
161
+ # Validate the correctness of rotary position embeddings parameters
162
+ # BC: if there is a 'type' field, move it to 'rope_type'.
163
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
164
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
165
+ rope_config_validation(self)
166
+
167
+ super().__init__(
168
+ pad_token_id=pad_token_id,
169
+ bos_token_id=bos_token_id,
170
+ eos_token_id=eos_token_id,
171
+ tie_word_embeddings=tie_word_embeddings,
172
+ **kwargs,
173
+ )
174
+
175
+
176
+ __all__ = ["PersimmonConfig"]
janus/lib/python3.10/site-packages/transformers/models/persimmon/modeling_persimmon.py ADDED
@@ -0,0 +1,1128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """PyTorch Persimmon model."""
21
+
22
+ import math
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.utils.checkpoint
27
+ from torch import nn
28
+ from torch.nn import CrossEntropyLoss
29
+
30
+ from ...activations import ACT2FN
31
+ from ...cache_utils import Cache, DynamicCache, StaticCache
32
+ from ...generation import GenerationMixin
33
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
34
+ from ...modeling_outputs import (
35
+ BaseModelOutputWithPast,
36
+ CausalLMOutputWithPast,
37
+ SequenceClassifierOutputWithPast,
38
+ TokenClassifierOutput,
39
+ )
40
+ from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
41
+ from ...modeling_utils import PreTrainedModel
42
+ from ...utils import (
43
+ add_code_sample_docstrings,
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ logging,
47
+ replace_return_docstrings,
48
+ )
49
+ from .configuration_persimmon import PersimmonConfig
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CHECKPOINT_FOR_DOC = "adept/persimmon-8b-base"
55
+ _CONFIG_FOR_DOC = "PersimmonConfig"
56
+
57
+
58
+ # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Persimmon
59
+ class PersimmonRotaryEmbedding(nn.Module):
60
+ def __init__(self, config: PersimmonConfig, device=None):
61
+ super().__init__()
62
+ # BC: "rope_type" was originally "type"
63
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
64
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
65
+ else:
66
+ self.rope_type = "default"
67
+ self.max_seq_len_cached = config.max_position_embeddings
68
+ self.original_max_seq_len = config.max_position_embeddings
69
+
70
+ self.config = config
71
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
72
+
73
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
74
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
75
+ self.original_inv_freq = self.inv_freq
76
+
77
+ def _dynamic_frequency_update(self, position_ids, device):
78
+ """
79
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
80
+ 1 - growing beyond the cached sequence length (allow scaling)
81
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
82
+ """
83
+ seq_len = torch.max(position_ids) + 1
84
+ if seq_len > self.max_seq_len_cached: # growth
85
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
86
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
87
+ self.max_seq_len_cached = seq_len
88
+
89
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
90
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
91
+ self.max_seq_len_cached = self.original_max_seq_len
92
+
93
+ @torch.no_grad()
94
+ def forward(self, x, position_ids):
95
+ if "dynamic" in self.rope_type:
96
+ self._dynamic_frequency_update(position_ids, device=x.device)
97
+
98
+ # Core RoPE block
99
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
100
+ position_ids_expanded = position_ids[:, None, :].float()
101
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
102
+ device_type = x.device.type
103
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
104
+ with torch.autocast(device_type=device_type, enabled=False):
105
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
106
+ emb = torch.cat((freqs, freqs), dim=-1)
107
+ cos = emb.cos()
108
+ sin = emb.sin()
109
+
110
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
111
+ cos = cos * self.attention_scaling
112
+ sin = sin * self.attention_scaling
113
+
114
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
115
+
116
+
117
+ # Copied from transformers.models.llama.modeling_llama.rotate_half
118
+ def rotate_half(x):
119
+ """Rotates half the hidden dims of the input."""
120
+ x1 = x[..., : x.shape[-1] // 2]
121
+ x2 = x[..., x.shape[-1] // 2 :]
122
+ return torch.cat((-x2, x1), dim=-1)
123
+
124
+
125
+ # Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
126
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
127
+ """Applies Rotary Position Embedding to the query and key tensors.
128
+
129
+ Args:
130
+ q (`torch.Tensor`): The query tensor.
131
+ k (`torch.Tensor`): The key tensor.
132
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
133
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
134
+ position_ids (`torch.Tensor`, *optional*):
135
+ Deprecated and unused.
136
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
137
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
138
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
139
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
140
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
141
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
142
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
143
+ Returns:
144
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
145
+ """
146
+ cos = cos.unsqueeze(unsqueeze_dim)
147
+ sin = sin.unsqueeze(unsqueeze_dim)
148
+ q_embed = (q * cos) + (rotate_half(q) * sin)
149
+ k_embed = (k * cos) + (rotate_half(k) * sin)
150
+ return q_embed, k_embed
151
+
152
+
153
+ # Copied from transformers.models.gpt_neox.modeling_gpt_neox.GPTNeoXMLP with GPTNeoX->Persimmon
154
+ class PersimmonMLP(nn.Module):
155
+ def __init__(self, config):
156
+ super().__init__()
157
+ self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)
158
+ self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)
159
+ self.act = ACT2FN[config.hidden_act]
160
+
161
+ def forward(self, hidden_states):
162
+ hidden_states = self.dense_h_to_4h(hidden_states)
163
+ hidden_states = self.act(hidden_states)
164
+ hidden_states = self.dense_4h_to_h(hidden_states)
165
+ return hidden_states
166
+
167
+
168
+ class PersimmonAttention(nn.Module):
169
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
170
+
171
+ def __init__(self, config: PersimmonConfig, layer_idx: Optional[int] = None):
172
+ super().__init__()
173
+ self.config = config
174
+ self.layer_idx = layer_idx
175
+ if layer_idx is None:
176
+ logger.warning_once(
177
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
178
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
179
+ "when creating this class."
180
+ )
181
+
182
+ self.hidden_size = config.hidden_size
183
+ self.num_heads = config.num_attention_heads
184
+ self.head_dim = self.hidden_size // self.num_heads
185
+ self.rope_theta = config.rope_theta
186
+ self.rotary_ndims = int(self.head_dim * config.partial_rotary_factor)
187
+ self.is_causal = True
188
+
189
+ if (self.head_dim * self.num_heads) != self.hidden_size:
190
+ raise ValueError(
191
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
192
+ f" and `num_heads`: {self.num_heads})."
193
+ )
194
+ self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)
195
+ self.dense = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=True)
196
+ self.qk_layernorm = config.qk_layernorm
197
+
198
+ if self.qk_layernorm:
199
+ self.q_layernorm = nn.LayerNorm(
200
+ config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
201
+ )
202
+ self.k_layernorm = nn.LayerNorm(
203
+ config.hidden_size // self.num_heads, eps=config.layer_norm_eps, elementwise_affine=True
204
+ )
205
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
206
+ self.rotary_emb = PersimmonRotaryEmbedding(config=self.config)
207
+
208
+ def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
209
+ """
210
+ Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
211
+ storage as `fused_qkv`
212
+
213
+ Args:
214
+ fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]
215
+
216
+ Returns:
217
+ query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
218
+ value: [batch_size, seq_length, num_heads, head_dim]
219
+ """
220
+ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
221
+ fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
222
+ return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
223
+
224
+ def forward(
225
+ self,
226
+ hidden_states: torch.Tensor,
227
+ attention_mask: Optional[torch.Tensor] = None,
228
+ position_ids: Optional[torch.LongTensor] = None,
229
+ past_key_value: Optional[Cache] = None,
230
+ output_attentions: bool = False,
231
+ use_cache: bool = False,
232
+ cache_position: Optional[torch.LongTensor] = None,
233
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
234
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
235
+ bsz, q_len, _ = hidden_states.size()
236
+
237
+ # [batch_size, seq_length, 3 x hidden_size]
238
+ fused_qkv = self.query_key_value(hidden_states)
239
+
240
+ # 3 x [batch_size, seq_length, num_heads, head_dim]
241
+ (query_states, key_states, value_states) = self._split_heads(fused_qkv)
242
+
243
+ if self.qk_layernorm:
244
+ query_states = self.q_layernorm(query_states)
245
+ key_states = self.k_layernorm(key_states)
246
+
247
+ # [batch_size, num_heads, seq_length, head_dim] -> [batch_size, seq_length, num_heads, head_dim]
248
+ query_states = query_states.transpose(1, 2)
249
+ value_states = value_states.transpose(1, 2)
250
+ key_states = key_states.transpose(1, 2)
251
+
252
+ cos, sin = position_embeddings
253
+
254
+ # Partial rotary embedding
255
+ query_rot, query_pass = (
256
+ query_states[..., : self.rotary_ndims],
257
+ query_states[..., self.rotary_ndims :],
258
+ )
259
+ key_rot, key_pass = (
260
+ key_states[..., : self.rotary_ndims],
261
+ key_states[..., self.rotary_ndims :],
262
+ )
263
+ # [batch_size, seq_length, num_heads, head_dim // config.partial_rotary_factor]
264
+ query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin)
265
+
266
+ # [batch_size, seq_length, num_heads, head_dim]
267
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
268
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
269
+
270
+ if past_key_value is not None:
271
+ # Specific to RoPE models with partial rotation
272
+ cache_kwargs = {
273
+ "sin": sin,
274
+ "cos": cos,
275
+ "partial_rotation_size": self.rotary_ndims,
276
+ "cache_position": cache_position,
277
+ }
278
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
279
+
280
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
281
+
282
+ if attention_mask is not None: # no matter the length, we just slice it
283
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
284
+ attn_weights = attn_weights + causal_mask
285
+
286
+ # upcast attention to fp32
287
+ attn_weights = nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query_states.dtype)
288
+ attn_weights = self.attention_dropout(attn_weights)
289
+
290
+ attn_output = torch.matmul(attn_weights, value_states)
291
+
292
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
293
+ raise ValueError(
294
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
295
+ f" {attn_output.size()}"
296
+ )
297
+
298
+ attn_output = attn_output.transpose(1, 2).contiguous()
299
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
300
+
301
+ attn_output = self.dense(attn_output)
302
+
303
+ if not output_attentions:
304
+ attn_weights = None
305
+
306
+ return attn_output, attn_weights, past_key_value
307
+
308
+
309
+ class PersimmonDecoderLayer(nn.Module):
310
+ def __init__(self, config: PersimmonConfig, layer_idx: int):
311
+ super().__init__()
312
+ self.hidden_size = config.hidden_size
313
+ self.self_attn = PersimmonAttention(config=config, layer_idx=layer_idx)
314
+ self.mlp = PersimmonMLP(config)
315
+ self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
316
+ self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
317
+ self.dropout = nn.Dropout(config.hidden_dropout)
318
+
319
+ def forward(
320
+ self,
321
+ hidden_states: torch.Tensor,
322
+ attention_mask: Optional[torch.Tensor] = None,
323
+ position_ids: Optional[torch.LongTensor] = None,
324
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
325
+ output_attentions: Optional[bool] = False,
326
+ use_cache: Optional[bool] = False,
327
+ cache_position: Optional[torch.LongTensor] = None,
328
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
329
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
330
+ """
331
+ Args:
332
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
333
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
334
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
335
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
336
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
337
+ `[0, config.n_positions - 1]`.
338
+ [What are position IDs?](../glossary#position-ids)
339
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
340
+ cached past key and value projection states
341
+ output_attentions (`bool`, *optional*):
342
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
343
+ returned tensors for more detail.
344
+ use_cache (`bool`, *optional*):
345
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
346
+ (see `past_key_values`).
347
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
348
+ Indices depicting the position of the input sequence tokens in the sequence
349
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
350
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
351
+ with `head_dim` being the embedding dimension of each attention head.
352
+ """
353
+
354
+ residual = hidden_states
355
+
356
+ hidden_states = self.input_layernorm(hidden_states)
357
+
358
+ # Self Attention
359
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
360
+ hidden_states=hidden_states,
361
+ attention_mask=attention_mask,
362
+ position_ids=position_ids,
363
+ past_key_value=past_key_value,
364
+ output_attentions=output_attentions,
365
+ use_cache=use_cache,
366
+ cache_position=cache_position,
367
+ position_embeddings=position_embeddings,
368
+ )
369
+ hidden_states = residual + hidden_states
370
+
371
+ # Fully Connected
372
+ residual = hidden_states
373
+ hidden_states = self.post_attention_layernorm(hidden_states)
374
+ hidden_states = self.mlp(hidden_states)
375
+
376
+ hidden_states = self.dropout(hidden_states)
377
+ hidden_states = hidden_states + residual
378
+
379
+ outputs = (hidden_states,)
380
+
381
+ if output_attentions:
382
+ outputs += (self_attn_weights,)
383
+
384
+ if use_cache:
385
+ outputs += (present_key_value,)
386
+
387
+ return outputs
388
+
389
+
390
+ PERSIMMON_START_DOCSTRING = r"""
391
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
392
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
393
+ etc.)
394
+
395
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
396
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
397
+ and behavior.
398
+
399
+ Parameters:
400
+ config ([`PersimmonConfig`]):
401
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
402
+ load the weights associated with the model, only the configuration. Check out the
403
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
404
+ """
405
+
406
+
407
+ @add_start_docstrings(
408
+ "The bare Persimmon Model outputting raw hidden-states without any specific head on top.",
409
+ PERSIMMON_START_DOCSTRING,
410
+ )
411
+ class PersimmonPreTrainedModel(PreTrainedModel):
412
+ config_class = PersimmonConfig
413
+ base_model_prefix = "model"
414
+ supports_gradient_checkpointing = True
415
+ _no_split_modules = ["PersimmonDecoderLayer"]
416
+ _skip_keys_device_placement = "past_key_values"
417
+ _supports_cache_class = True
418
+ _supports_quantized_cache = True
419
+ _supports_static_cache = True
420
+
421
+ def _init_weights(self, module):
422
+ std = self.config.initializer_range
423
+ if isinstance(module, nn.Linear):
424
+ module.weight.data.normal_(mean=0.0, std=std)
425
+ if module.bias is not None:
426
+ module.bias.data.zero_()
427
+ elif isinstance(module, nn.Embedding):
428
+ module.weight.data.normal_(mean=0.0, std=std)
429
+ if module.padding_idx is not None:
430
+ module.weight.data[module.padding_idx].zero_()
431
+
432
+
433
+ PERSIMMON_INPUTS_DOCSTRING = r"""
434
+ Args:
435
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
436
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
437
+ it.
438
+
439
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
440
+ [`PreTrainedTokenizer.__call__`] for details.
441
+
442
+ [What are input IDs?](../glossary#input-ids)
443
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
444
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
445
+
446
+ - 1 for tokens that are **not masked**,
447
+ - 0 for tokens that are **masked**.
448
+
449
+ [What are attention masks?](../glossary#attention-mask)
450
+
451
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
452
+ [`PreTrainedTokenizer.__call__`] for details.
453
+
454
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
455
+ `past_key_values`).
456
+
457
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
458
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
459
+ information on the default strategy.
460
+
461
+ - 1 indicates the head is **not masked**,
462
+ - 0 indicates the head is **masked**.
463
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
464
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
465
+ config.n_positions - 1]`.
466
+
467
+ [What are position IDs?](../glossary#position-ids)
468
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
469
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
470
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
471
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
472
+
473
+ Two formats are allowed:
474
+ - a [`~cache_utils.Cache`] instance, see our
475
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
476
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
477
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
478
+ cache format.
479
+
480
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
481
+ legacy cache format will be returned.
482
+
483
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
484
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
485
+ of shape `(batch_size, sequence_length)`.
486
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
487
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
488
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
489
+ model's internal embedding lookup matrix.
490
+ use_cache (`bool`, *optional*):
491
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
492
+ `past_key_values`).
493
+ output_attentions (`bool`, *optional*):
494
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
495
+ tensors for more detail.
496
+ output_hidden_states (`bool`, *optional*):
497
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
498
+ more detail.
499
+ return_dict (`bool`, *optional*):
500
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
501
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
502
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
503
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
504
+ the complete sequence length.
505
+ """
506
+
507
+
508
+ @add_start_docstrings(
509
+ "The bare Persimmon Model outputting raw hidden-states without any specific head on top.",
510
+ PERSIMMON_START_DOCSTRING,
511
+ )
512
+ class PersimmonModel(PersimmonPreTrainedModel):
513
+ """
514
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]
515
+
516
+ Args:
517
+ config: PersimmonConfig
518
+ """
519
+
520
+ def __init__(self, config: PersimmonConfig):
521
+ super().__init__(config)
522
+ self.padding_idx = config.pad_token_id
523
+ self.vocab_size = config.vocab_size
524
+
525
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
526
+ self.layers = nn.ModuleList(
527
+ [PersimmonDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
528
+ )
529
+ self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
530
+
531
+ self.rotary_emb = PersimmonRotaryEmbedding(config=config)
532
+
533
+ self.gradient_checkpointing = False
534
+ # Initialize weights and apply final processing
535
+ self.post_init()
536
+
537
+ def get_input_embeddings(self):
538
+ return self.embed_tokens
539
+
540
+ def set_input_embeddings(self, value):
541
+ self.embed_tokens = value
542
+
543
+ @add_start_docstrings_to_model_forward(PERSIMMON_INPUTS_DOCSTRING)
544
+ def forward(
545
+ self,
546
+ input_ids: torch.LongTensor = None,
547
+ attention_mask: Optional[torch.Tensor] = None,
548
+ position_ids: Optional[torch.LongTensor] = None,
549
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
550
+ inputs_embeds: Optional[torch.FloatTensor] = None,
551
+ use_cache: Optional[bool] = None,
552
+ output_attentions: Optional[bool] = None,
553
+ output_hidden_states: Optional[bool] = None,
554
+ return_dict: Optional[bool] = None,
555
+ cache_position: Optional[torch.LongTensor] = None,
556
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
557
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
558
+ output_hidden_states = (
559
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
560
+ )
561
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
562
+
563
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
564
+
565
+ if (input_ids is None) ^ (inputs_embeds is not None):
566
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
567
+
568
+ if self.gradient_checkpointing and self.training:
569
+ if use_cache:
570
+ logger.warning_once(
571
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
572
+ )
573
+ use_cache = False
574
+
575
+ # kept for BC (non `Cache` `past_key_values` inputs)
576
+ return_legacy_cache = False
577
+ if use_cache and not isinstance(past_key_values, Cache):
578
+ return_legacy_cache = True
579
+ if past_key_values is None:
580
+ past_key_values = DynamicCache()
581
+ else:
582
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
583
+ logger.warning_once(
584
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
585
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
586
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
587
+ )
588
+
589
+ if inputs_embeds is None:
590
+ inputs_embeds = self.embed_tokens(input_ids)
591
+
592
+ if cache_position is None:
593
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
594
+ cache_position = torch.arange(
595
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
596
+ )
597
+ if position_ids is None:
598
+ position_ids = cache_position.unsqueeze(0)
599
+
600
+ causal_mask = self._update_causal_mask(
601
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
602
+ )
603
+
604
+ hidden_states = inputs_embeds
605
+
606
+ # create position embeddings to be shared across the decoder layers
607
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
608
+
609
+ # decoder layers
610
+ all_hidden_states = () if output_hidden_states else None
611
+ all_self_attns = () if output_attentions else None
612
+ next_decoder_cache = None
613
+
614
+ for decoder_layer in self.layers:
615
+ if output_hidden_states:
616
+ all_hidden_states += (hidden_states,)
617
+
618
+ if self.gradient_checkpointing and self.training:
619
+ layer_outputs = self._gradient_checkpointing_func(
620
+ decoder_layer.__call__,
621
+ hidden_states,
622
+ causal_mask,
623
+ position_ids,
624
+ past_key_values,
625
+ output_attentions,
626
+ use_cache,
627
+ cache_position,
628
+ position_embeddings,
629
+ )
630
+ else:
631
+ layer_outputs = decoder_layer(
632
+ hidden_states,
633
+ attention_mask=causal_mask,
634
+ position_ids=position_ids,
635
+ past_key_value=past_key_values,
636
+ output_attentions=output_attentions,
637
+ use_cache=use_cache,
638
+ cache_position=cache_position,
639
+ position_embeddings=position_embeddings,
640
+ )
641
+
642
+ hidden_states = layer_outputs[0]
643
+
644
+ if use_cache:
645
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
646
+
647
+ if output_attentions:
648
+ all_self_attns += (layer_outputs[1],)
649
+
650
+ hidden_states = self.final_layernorm(hidden_states)
651
+
652
+ # add hidden states from the last decoder layer
653
+ if output_hidden_states:
654
+ all_hidden_states += (hidden_states,)
655
+
656
+ next_cache = next_decoder_cache if use_cache else None
657
+ if return_legacy_cache:
658
+ next_cache = next_cache.to_legacy_cache()
659
+
660
+ if not return_dict:
661
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
662
+ return BaseModelOutputWithPast(
663
+ last_hidden_state=hidden_states,
664
+ past_key_values=next_cache,
665
+ hidden_states=all_hidden_states,
666
+ attentions=all_self_attns,
667
+ )
668
+
669
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
670
+ def _update_causal_mask(
671
+ self,
672
+ attention_mask: torch.Tensor,
673
+ input_tensor: torch.Tensor,
674
+ cache_position: torch.Tensor,
675
+ past_key_values: Cache,
676
+ output_attentions: bool,
677
+ ):
678
+ if self.config._attn_implementation == "flash_attention_2":
679
+ if attention_mask is not None and (attention_mask == 0.0).any():
680
+ return attention_mask
681
+ return None
682
+
683
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
684
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
685
+ # to infer the attention mask.
686
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
687
+ using_static_cache = isinstance(past_key_values, StaticCache)
688
+
689
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
690
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
691
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
692
+ attention_mask,
693
+ inputs_embeds=input_tensor,
694
+ past_key_values_length=past_seen_tokens,
695
+ is_training=self.training,
696
+ ):
697
+ return None
698
+
699
+ dtype, device = input_tensor.dtype, input_tensor.device
700
+ sequence_length = input_tensor.shape[1]
701
+ if using_static_cache:
702
+ target_length = past_key_values.get_max_cache_shape()
703
+ else:
704
+ target_length = (
705
+ attention_mask.shape[-1]
706
+ if isinstance(attention_mask, torch.Tensor)
707
+ else past_seen_tokens + sequence_length + 1
708
+ )
709
+
710
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
711
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
712
+ attention_mask,
713
+ sequence_length=sequence_length,
714
+ target_length=target_length,
715
+ dtype=dtype,
716
+ device=device,
717
+ cache_position=cache_position,
718
+ batch_size=input_tensor.shape[0],
719
+ )
720
+
721
+ if (
722
+ self.config._attn_implementation == "sdpa"
723
+ and attention_mask is not None
724
+ and attention_mask.device.type == "cuda"
725
+ and not output_attentions
726
+ ):
727
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
728
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
729
+ # Details: https://github.com/pytorch/pytorch/issues/110213
730
+ min_dtype = torch.finfo(dtype).min
731
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
732
+
733
+ return causal_mask
734
+
735
+ @staticmethod
736
+ # Copied from transformers.models.llama.modeling_llama.LlamaModel._prepare_4d_causal_attention_mask_with_cache_position
737
+ def _prepare_4d_causal_attention_mask_with_cache_position(
738
+ attention_mask: torch.Tensor,
739
+ sequence_length: int,
740
+ target_length: int,
741
+ dtype: torch.dtype,
742
+ device: torch.device,
743
+ cache_position: torch.Tensor,
744
+ batch_size: int,
745
+ **kwargs,
746
+ ):
747
+ """
748
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
749
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
750
+
751
+ Args:
752
+ attention_mask (`torch.Tensor`):
753
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
754
+ `(batch_size, 1, query_length, key_value_length)`.
755
+ sequence_length (`int`):
756
+ The sequence length being processed.
757
+ target_length (`int`):
758
+ The target length: when generating with static cache, the mask should be as long as the static cache,
759
+ to account for the 0 padding, the part of the cache that is not filled yet.
760
+ dtype (`torch.dtype`):
761
+ The dtype to use for the 4D attention mask.
762
+ device (`torch.device`):
763
+ The device to plcae the 4D attention mask on.
764
+ cache_position (`torch.Tensor`):
765
+ Indices depicting the position of the input sequence tokens in the sequence.
766
+ batch_size (`torch.Tensor`):
767
+ Batch size.
768
+ """
769
+ if attention_mask is not None and attention_mask.dim() == 4:
770
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
771
+ causal_mask = attention_mask
772
+ else:
773
+ min_dtype = torch.finfo(dtype).min
774
+ causal_mask = torch.full(
775
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
776
+ )
777
+ if sequence_length != 1:
778
+ causal_mask = torch.triu(causal_mask, diagonal=1)
779
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
780
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
781
+ if attention_mask is not None:
782
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
783
+ mask_length = attention_mask.shape[-1]
784
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
785
+ padding_mask = padding_mask == 0
786
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
787
+ padding_mask, min_dtype
788
+ )
789
+
790
+ return causal_mask
791
+
792
+
793
+ class PersimmonForCausalLM(PersimmonPreTrainedModel, GenerationMixin):
794
+ _tied_weights_keys = ["lm_head.weight"]
795
+
796
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with LLAMA->PERSIMMON,Llama->Persimmon
797
+ def __init__(self, config):
798
+ super().__init__(config)
799
+ self.model = PersimmonModel(config)
800
+ self.vocab_size = config.vocab_size
801
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
802
+
803
+ # Initialize weights and apply final processing
804
+ self.post_init()
805
+
806
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
807
+ def get_input_embeddings(self):
808
+ return self.model.embed_tokens
809
+
810
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
811
+ def set_input_embeddings(self, value):
812
+ self.model.embed_tokens = value
813
+
814
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
815
+ def get_output_embeddings(self):
816
+ return self.lm_head
817
+
818
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
819
+ def set_output_embeddings(self, new_embeddings):
820
+ self.lm_head = new_embeddings
821
+
822
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
823
+ def set_decoder(self, decoder):
824
+ self.model = decoder
825
+
826
+ # Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
827
+ def get_decoder(self):
828
+ return self.model
829
+
830
+ @add_start_docstrings_to_model_forward(PERSIMMON_INPUTS_DOCSTRING)
831
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
832
+ def forward(
833
+ self,
834
+ input_ids: torch.LongTensor = None,
835
+ attention_mask: Optional[torch.Tensor] = None,
836
+ position_ids: Optional[torch.LongTensor] = None,
837
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
838
+ inputs_embeds: Optional[torch.FloatTensor] = None,
839
+ labels: Optional[torch.LongTensor] = None,
840
+ use_cache: Optional[bool] = None,
841
+ output_attentions: Optional[bool] = None,
842
+ output_hidden_states: Optional[bool] = None,
843
+ return_dict: Optional[bool] = None,
844
+ cache_position: Optional[torch.LongTensor] = None,
845
+ num_logits_to_keep: int = 0,
846
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
847
+ r"""
848
+ Args:
849
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
850
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
851
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
852
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
853
+
854
+ num_logits_to_keep (`int`, *optional*):
855
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
856
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
857
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
858
+
859
+ Returns:
860
+
861
+ Example:
862
+
863
+ ```python
864
+ >>> from transformers import AutoTokenizer, PersimmonForCausalLM
865
+
866
+ >>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base")
867
+ >>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")
868
+
869
+ >>> prompt = "human: Hey, what should I eat for dinner?"
870
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
871
+
872
+ >>> # Generate
873
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
874
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
875
+ 'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
876
+ ```"""
877
+
878
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
879
+ output_hidden_states = (
880
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
881
+ )
882
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
883
+
884
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
885
+ outputs = self.model(
886
+ input_ids=input_ids,
887
+ attention_mask=attention_mask,
888
+ position_ids=position_ids,
889
+ past_key_values=past_key_values,
890
+ inputs_embeds=inputs_embeds,
891
+ use_cache=use_cache,
892
+ output_attentions=output_attentions,
893
+ output_hidden_states=output_hidden_states,
894
+ return_dict=return_dict,
895
+ cache_position=cache_position,
896
+ )
897
+
898
+ hidden_states = outputs[0]
899
+ # No upscaling to float was ever done for Persimmon
900
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
901
+
902
+ loss = None
903
+ if labels is not None:
904
+ # Shift so that tokens < n predict n
905
+ shift_logits = logits[..., :-1, :].contiguous()
906
+ shift_labels = labels[..., 1:].contiguous()
907
+ # Flatten the tokens
908
+ loss_fct = CrossEntropyLoss()
909
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
910
+ shift_labels = shift_labels.view(-1)
911
+ # Enable model parallelism
912
+ shift_labels = shift_labels.to(shift_logits.device)
913
+ loss = loss_fct(shift_logits, shift_labels)
914
+
915
+ if not return_dict:
916
+ output = (logits,) + outputs[1:]
917
+ return (loss,) + output if loss is not None else output
918
+
919
+ return CausalLMOutputWithPast(
920
+ loss=loss,
921
+ logits=logits,
922
+ past_key_values=outputs.past_key_values,
923
+ hidden_states=outputs.hidden_states,
924
+ attentions=outputs.attentions,
925
+ )
926
+
927
+
928
+ @add_start_docstrings(
929
+ """
930
+ The Persimmon transformer with a sequence classification head on top (linear layer).
931
+
932
+ [`PersimmonForSequenceClassification`] uses the last token in order to do the classification, as other causal
933
+ models (e.g. GPT-2) do.
934
+
935
+ Since it does classification on the last token, it requires to know the position of the last token. If a
936
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
937
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
938
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
939
+ each row of the batch).
940
+ """,
941
+ PERSIMMON_START_DOCSTRING,
942
+ )
943
+ # Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with LLAMA->PERSIMMON,Llama->Persimmon
944
+ class PersimmonForSequenceClassification(PersimmonPreTrainedModel):
945
+ def __init__(self, config):
946
+ super().__init__(config)
947
+ self.num_labels = config.num_labels
948
+ self.model = PersimmonModel(config)
949
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
950
+
951
+ # Initialize weights and apply final processing
952
+ self.post_init()
953
+
954
+ def get_input_embeddings(self):
955
+ return self.model.embed_tokens
956
+
957
+ def set_input_embeddings(self, value):
958
+ self.model.embed_tokens = value
959
+
960
+ @add_start_docstrings_to_model_forward(PERSIMMON_INPUTS_DOCSTRING)
961
+ def forward(
962
+ self,
963
+ input_ids: Optional[torch.LongTensor] = None,
964
+ attention_mask: Optional[torch.Tensor] = None,
965
+ position_ids: Optional[torch.LongTensor] = None,
966
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
967
+ inputs_embeds: Optional[torch.FloatTensor] = None,
968
+ labels: Optional[torch.LongTensor] = None,
969
+ use_cache: Optional[bool] = None,
970
+ output_attentions: Optional[bool] = None,
971
+ output_hidden_states: Optional[bool] = None,
972
+ return_dict: Optional[bool] = None,
973
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
974
+ r"""
975
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
976
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
977
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
978
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
979
+ """
980
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
981
+
982
+ transformer_outputs = self.model(
983
+ input_ids,
984
+ attention_mask=attention_mask,
985
+ position_ids=position_ids,
986
+ past_key_values=past_key_values,
987
+ inputs_embeds=inputs_embeds,
988
+ use_cache=use_cache,
989
+ output_attentions=output_attentions,
990
+ output_hidden_states=output_hidden_states,
991
+ return_dict=return_dict,
992
+ )
993
+ hidden_states = transformer_outputs[0]
994
+ logits = self.score(hidden_states)
995
+
996
+ if input_ids is not None:
997
+ batch_size = input_ids.shape[0]
998
+ else:
999
+ batch_size = inputs_embeds.shape[0]
1000
+
1001
+ if self.config.pad_token_id is None and batch_size != 1:
1002
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1003
+ if self.config.pad_token_id is None:
1004
+ sequence_lengths = -1
1005
+ else:
1006
+ if input_ids is not None:
1007
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1008
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1009
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1010
+ sequence_lengths = sequence_lengths.to(logits.device)
1011
+ else:
1012
+ sequence_lengths = -1
1013
+
1014
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1015
+
1016
+ loss = None
1017
+ if labels is not None:
1018
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
1019
+
1020
+ if not return_dict:
1021
+ output = (pooled_logits,) + transformer_outputs[1:]
1022
+ return ((loss,) + output) if loss is not None else output
1023
+
1024
+ return SequenceClassifierOutputWithPast(
1025
+ loss=loss,
1026
+ logits=pooled_logits,
1027
+ past_key_values=transformer_outputs.past_key_values,
1028
+ hidden_states=transformer_outputs.hidden_states,
1029
+ attentions=transformer_outputs.attentions,
1030
+ )
1031
+
1032
+
1033
+ @add_start_docstrings(
1034
+ """
1035
+ The Persimmon Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1036
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1037
+ """,
1038
+ PERSIMMON_START_DOCSTRING,
1039
+ )
1040
+ # Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Persimmon, LLAMA->PERSIMMON
1041
+ class PersimmonForTokenClassification(PersimmonPreTrainedModel):
1042
+ def __init__(self, config):
1043
+ super().__init__(config)
1044
+ self.num_labels = config.num_labels
1045
+ self.model = PersimmonModel(config)
1046
+ if getattr(config, "classifier_dropout", None) is not None:
1047
+ classifier_dropout = config.classifier_dropout
1048
+ elif getattr(config, "hidden_dropout", None) is not None:
1049
+ classifier_dropout = config.hidden_dropout
1050
+ else:
1051
+ classifier_dropout = 0.1
1052
+ self.dropout = nn.Dropout(classifier_dropout)
1053
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1054
+
1055
+ # Initialize weights and apply final processing
1056
+ self.post_init()
1057
+
1058
+ def get_input_embeddings(self):
1059
+ return self.model.embed_tokens
1060
+
1061
+ def set_input_embeddings(self, value):
1062
+ self.model.embed_tokens = value
1063
+
1064
+ @add_start_docstrings_to_model_forward(PERSIMMON_INPUTS_DOCSTRING)
1065
+ @add_code_sample_docstrings(
1066
+ checkpoint=_CHECKPOINT_FOR_DOC,
1067
+ output_type=TokenClassifierOutput,
1068
+ config_class=_CONFIG_FOR_DOC,
1069
+ )
1070
+ def forward(
1071
+ self,
1072
+ input_ids: Optional[torch.LongTensor] = None,
1073
+ attention_mask: Optional[torch.Tensor] = None,
1074
+ position_ids: Optional[torch.LongTensor] = None,
1075
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1076
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1077
+ labels: Optional[torch.LongTensor] = None,
1078
+ use_cache: Optional[bool] = None,
1079
+ output_attentions: Optional[bool] = None,
1080
+ output_hidden_states: Optional[bool] = None,
1081
+ return_dict: Optional[bool] = None,
1082
+ ) -> Union[Tuple, TokenClassifierOutput]:
1083
+ r"""
1084
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1085
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1086
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1087
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1088
+ """
1089
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1090
+
1091
+ outputs = self.model(
1092
+ input_ids,
1093
+ attention_mask=attention_mask,
1094
+ position_ids=position_ids,
1095
+ past_key_values=past_key_values,
1096
+ inputs_embeds=inputs_embeds,
1097
+ use_cache=use_cache,
1098
+ output_attentions=output_attentions,
1099
+ output_hidden_states=output_hidden_states,
1100
+ return_dict=return_dict,
1101
+ )
1102
+ sequence_output = outputs[0]
1103
+ sequence_output = self.dropout(sequence_output)
1104
+ logits = self.score(sequence_output)
1105
+
1106
+ loss = None
1107
+ if labels is not None:
1108
+ loss = self.loss_function(logits, labels, self.config)
1109
+
1110
+ if not return_dict:
1111
+ output = (logits,) + outputs[2:]
1112
+ return ((loss,) + output) if loss is not None else output
1113
+
1114
+ return TokenClassifierOutput(
1115
+ loss=loss,
1116
+ logits=logits,
1117
+ hidden_states=outputs.hidden_states,
1118
+ attentions=outputs.attentions,
1119
+ )
1120
+
1121
+
1122
+ __all__ = [
1123
+ "PersimmonForCausalLM",
1124
+ "PersimmonModel",
1125
+ "PersimmonPreTrainedModel",
1126
+ "PersimmonForSequenceClassification",
1127
+ "PersimmonForTokenClassification",
1128
+ ]
janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (647 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/configuration_pixtral.cpython-310.pyc ADDED
Binary file (3.68 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/pixtral/__pycache__/image_processing_pixtral.cpython-310.pyc ADDED
Binary file (19 kB). View file