ZTWHHH commited on
Commit
fa674f4
·
verified ·
1 Parent(s): febb5fa

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. infer_4_33_0/bin/python +3 -0
  3. janus/lib/python3.10/site-packages/transformers/models/big_bird/__init__.py +30 -0
  4. janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/__init__.cpython-310.pyc +0 -0
  5. janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/configuration_big_bird.cpython-310.pyc +0 -0
  6. janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_big_bird.cpython-310.pyc +0 -0
  7. janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_flax_big_bird.cpython-310.pyc +0 -0
  8. janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird.cpython-310.pyc +0 -0
  9. janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird_fast.cpython-310.pyc +0 -0
  10. janus/lib/python3.10/site-packages/transformers/models/big_bird/configuration_big_bird.py +176 -0
  11. janus/lib/python3.10/site-packages/transformers/models/big_bird/modeling_big_bird.py +0 -0
  12. janus/lib/python3.10/site-packages/transformers/models/big_bird/modeling_flax_big_bird.py +0 -0
  13. janus/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird.py +324 -0
  14. janus/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird_fast.py +232 -0
  15. janus/lib/python3.10/site-packages/transformers/models/bros/__init__.py +28 -0
  16. janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/processing_bros.cpython-310.pyc +0 -0
  17. janus/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py +28 -0
  18. janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc +0 -0
  19. janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc +0 -0
  20. janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc +0 -0
  21. janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc +0 -0
  22. janus/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py +118 -0
  23. janus/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py +574 -0
  24. janus/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py +683 -0
  25. janus/lib/python3.10/site-packages/transformers/models/emu3/__init__.py +29 -0
  26. janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/configuration_emu3.cpython-310.pyc +0 -0
  27. janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/image_processing_emu3.cpython-310.pyc +0 -0
  28. janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/modular_emu3.cpython-310.pyc +0 -0
  29. janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/processing_emu3.cpython-310.pyc +0 -0
  30. janus/lib/python3.10/site-packages/transformers/models/emu3/configuration_emu3.py +327 -0
  31. janus/lib/python3.10/site-packages/transformers/models/emu3/image_processing_emu3.py +552 -0
  32. janus/lib/python3.10/site-packages/transformers/models/emu3/modeling_emu3.py +1949 -0
  33. janus/lib/python3.10/site-packages/transformers/models/emu3/modular_emu3.py +1270 -0
  34. janus/lib/python3.10/site-packages/transformers/models/emu3/processing_emu3.py +217 -0
  35. janus/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/configuration_lilt.cpython-310.pyc +0 -0
  36. janus/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/modeling_lilt.cpython-310.pyc +0 -0
  37. janus/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py +30 -0
  38. janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc +0 -0
  39. janus/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py +169 -0
  40. janus/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py +1461 -0
  41. janus/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py +1661 -0
  42. janus/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py +511 -0
  43. janus/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py +172 -0
  44. janus/lib/python3.10/site-packages/transformers/models/mamba2/__init__.py +27 -0
  45. janus/lib/python3.10/site-packages/transformers/models/mamba2/__pycache__/__init__.cpython-310.pyc +0 -0
  46. janus/lib/python3.10/site-packages/transformers/models/mvp/__init__.py +29 -0
  47. janus/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/configuration_mvp.cpython-310.pyc +0 -0
  48. janus/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/tokenization_mvp_fast.cpython-310.pyc +0 -0
  49. janus/lib/python3.10/site-packages/transformers/models/mvp/configuration_mvp.py +183 -0
  50. janus/lib/python3.10/site-packages/transformers/models/mvp/tokenization_mvp.py +394 -0
.gitattributes CHANGED
@@ -442,3 +442,4 @@ janus/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
442
  janus/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
443
  janus/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
444
  janus/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
442
  janus/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
443
  janus/lib/python3.10/site-packages/transformers/models/oneformer/__pycache__/modeling_oneformer.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
444
  janus/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
445
+ infer_4_33_0/bin/python filter=lfs diff=lfs merge=lfs -text
infer_4_33_0/bin/python ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd12d9a162d0964191f823f51c251b1e50da59e8fd71c709a8a4e7ecdeee3d36
3
+ size 17225608
janus/lib/python3.10/site-packages/transformers/models/big_bird/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_big_bird import *
22
+ from .modeling_big_bird import *
23
+ from .modeling_flax_big_bird import *
24
+ from .tokenization_big_bird import *
25
+ from .tokenization_big_bird_fast import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (648 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/configuration_big_bird.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_big_bird.cpython-310.pyc ADDED
Binary file (83 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/modeling_flax_big_bird.cpython-310.pyc ADDED
Binary file (63.1 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/__pycache__/tokenization_big_bird_fast.cpython-310.pyc ADDED
Binary file (8.95 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/configuration_big_bird.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """BigBird model configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import Mapping
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class BigBirdConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`BigBirdModel`]. It is used to instantiate an
31
+ BigBird model according to the specified arguments, defining the model architecture. Instantiating a configuration
32
+ with the defaults will yield a similar configuration to that of the BigBird
33
+ [google/bigbird-roberta-base](https://huggingface.co/google/bigbird-roberta-base) architecture.
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 50358):
41
+ Vocabulary size of the BigBird model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`BigBirdModel`].
43
+ hidden_size (`int`, *optional*, defaults to 768):
44
+ Dimension of the encoder layers and the pooler layer.
45
+ num_hidden_layers (`int`, *optional*, defaults to 12):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 12):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ intermediate_size (`int`, *optional*, defaults to 3072):
50
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
51
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
52
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
53
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
54
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
55
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
56
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
57
+ The dropout ratio for the attention probabilities.
58
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
59
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
60
+ just in case (e.g., 1024 or 2048 or 4096).
61
+ type_vocab_size (`int`, *optional*, defaults to 2):
62
+ The vocabulary size of the `token_type_ids` passed when calling [`BigBirdModel`].
63
+ initializer_range (`float`, *optional*, defaults to 0.02):
64
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
65
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
66
+ The epsilon used by the layer normalization layers.
67
+ is_decoder (`bool`, *optional*, defaults to `False`):
68
+ Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ attention_type (`str`, *optional*, defaults to `"block_sparse"`)
73
+ Whether to use block sparse attention (with n complexity) as introduced in paper or original attention
74
+ layer (with n^2 complexity). Possible values are `"original_full"` and `"block_sparse"`.
75
+ use_bias (`bool`, *optional*, defaults to `True`)
76
+ Whether to use bias in query, key, value.
77
+ rescale_embeddings (`bool`, *optional*, defaults to `False`)
78
+ Whether to rescale embeddings with (hidden_size ** 0.5).
79
+ block_size (`int`, *optional*, defaults to 64)
80
+ Size of each block. Useful only when `attention_type == "block_sparse"`.
81
+ num_random_blocks (`int`, *optional*, defaults to 3)
82
+ Each query is going to attend these many number of random blocks. Useful only when `attention_type ==
83
+ "block_sparse"`.
84
+ classifier_dropout (`float`, *optional*):
85
+ The dropout ratio for the classification head.
86
+
87
+ Example:
88
+
89
+ ```python
90
+ >>> from transformers import BigBirdConfig, BigBirdModel
91
+
92
+ >>> # Initializing a BigBird google/bigbird-roberta-base style configuration
93
+ >>> configuration = BigBirdConfig()
94
+
95
+ >>> # Initializing a model (with random weights) from the google/bigbird-roberta-base style configuration
96
+ >>> model = BigBirdModel(configuration)
97
+
98
+ >>> # Accessing the model configuration
99
+ >>> configuration = model.config
100
+ ```"""
101
+
102
+ model_type = "big_bird"
103
+
104
+ def __init__(
105
+ self,
106
+ vocab_size=50358,
107
+ hidden_size=768,
108
+ num_hidden_layers=12,
109
+ num_attention_heads=12,
110
+ intermediate_size=3072,
111
+ hidden_act="gelu_new",
112
+ hidden_dropout_prob=0.1,
113
+ attention_probs_dropout_prob=0.1,
114
+ max_position_embeddings=4096,
115
+ type_vocab_size=2,
116
+ initializer_range=0.02,
117
+ layer_norm_eps=1e-12,
118
+ use_cache=True,
119
+ pad_token_id=0,
120
+ bos_token_id=1,
121
+ eos_token_id=2,
122
+ sep_token_id=66,
123
+ attention_type="block_sparse",
124
+ use_bias=True,
125
+ rescale_embeddings=False,
126
+ block_size=64,
127
+ num_random_blocks=3,
128
+ classifier_dropout=None,
129
+ **kwargs,
130
+ ):
131
+ super().__init__(
132
+ pad_token_id=pad_token_id,
133
+ bos_token_id=bos_token_id,
134
+ eos_token_id=eos_token_id,
135
+ sep_token_id=sep_token_id,
136
+ **kwargs,
137
+ )
138
+
139
+ self.vocab_size = vocab_size
140
+ self.max_position_embeddings = max_position_embeddings
141
+ self.hidden_size = hidden_size
142
+ self.num_hidden_layers = num_hidden_layers
143
+ self.num_attention_heads = num_attention_heads
144
+ self.intermediate_size = intermediate_size
145
+ self.hidden_act = hidden_act
146
+ self.hidden_dropout_prob = hidden_dropout_prob
147
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
148
+ self.initializer_range = initializer_range
149
+ self.type_vocab_size = type_vocab_size
150
+ self.layer_norm_eps = layer_norm_eps
151
+ self.use_cache = use_cache
152
+
153
+ self.rescale_embeddings = rescale_embeddings
154
+ self.attention_type = attention_type
155
+ self.use_bias = use_bias
156
+ self.block_size = block_size
157
+ self.num_random_blocks = num_random_blocks
158
+ self.classifier_dropout = classifier_dropout
159
+
160
+
161
+ class BigBirdOnnxConfig(OnnxConfig):
162
+ @property
163
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
164
+ if self.task == "multiple-choice":
165
+ dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
166
+ else:
167
+ dynamic_axis = {0: "batch", 1: "sequence"}
168
+ return OrderedDict(
169
+ [
170
+ ("input_ids", dynamic_axis),
171
+ ("attention_mask", dynamic_axis),
172
+ ]
173
+ )
174
+
175
+
176
+ __all__ = ["BigBirdConfig", "BigBirdOnnxConfig"]
janus/lib/python3.10/site-packages/transformers/models/big_bird/modeling_big_bird.py ADDED
The diff for this file is too large to render. See raw diff
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/modeling_flax_big_bird.py ADDED
The diff for this file is too large to render. See raw diff
 
janus/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for BigBird."""
16
+
17
+ import os
18
+ import re
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+
24
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
25
+ from ...utils import logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
31
+
32
+
33
+ class BigBirdTokenizer(PreTrainedTokenizer):
34
+ """
35
+ Construct a BigBird tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
36
+
37
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
38
+ this superclass for more information regarding those methods.
39
+
40
+ Args:
41
+ vocab_file (`str`):
42
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
43
+ contains the vocabulary necessary to instantiate a tokenizer.
44
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
45
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
46
+ token instead.
47
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
48
+ The begin of sequence token.
49
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
50
+ The end of sequence token.
51
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
52
+ The token used for padding, for example when batching sequences of different lengths.
53
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
54
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
55
+ sequence classification or for a text and a question for question answering. It is also used as the last
56
+ token of a sequence built with special tokens.
57
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
58
+ The token used for masking values. This is the token used when training this model with masked language
59
+ modeling. This is the token which the model will try to predict.
60
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
61
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
62
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
63
+ sp_model_kwargs (`dict`, *optional*):
64
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
65
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
66
+ to set:
67
+
68
+ - `enable_sampling`: Enable subword regularization.
69
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
70
+
71
+ - `nbest_size = {0,1}`: No sampling is performed.
72
+ - `nbest_size > 1`: samples from the nbest_size results.
73
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
74
+ using forward-filtering-and-backward-sampling algorithm.
75
+
76
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
77
+ BPE-dropout.
78
+ """
79
+
80
+ vocab_files_names = VOCAB_FILES_NAMES
81
+ model_input_names = ["input_ids", "attention_mask"]
82
+ prefix_tokens: List[int] = []
83
+
84
+ def __init__(
85
+ self,
86
+ vocab_file,
87
+ unk_token="<unk>",
88
+ bos_token="<s>",
89
+ eos_token="</s>",
90
+ pad_token="<pad>",
91
+ sep_token="[SEP]",
92
+ mask_token="[MASK]",
93
+ cls_token="[CLS]",
94
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
95
+ **kwargs,
96
+ ) -> None:
97
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
98
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
99
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
100
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
101
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
102
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
103
+
104
+ # Mask token behave like a normal word, i.e. include the space before it
105
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
106
+
107
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
108
+
109
+ self.vocab_file = vocab_file
110
+
111
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
112
+ self.sp_model.Load(vocab_file)
113
+
114
+ super().__init__(
115
+ bos_token=bos_token,
116
+ eos_token=eos_token,
117
+ unk_token=unk_token,
118
+ pad_token=pad_token,
119
+ sep_token=sep_token,
120
+ mask_token=mask_token,
121
+ cls_token=cls_token,
122
+ sp_model_kwargs=self.sp_model_kwargs,
123
+ **kwargs,
124
+ )
125
+
126
+ @property
127
+ def vocab_size(self):
128
+ return self.sp_model.get_piece_size()
129
+
130
+ def get_vocab(self):
131
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
132
+ vocab.update(self.added_tokens_encoder)
133
+ return vocab
134
+
135
+ def __getstate__(self):
136
+ state = self.__dict__.copy()
137
+ state["sp_model"] = None
138
+ return state
139
+
140
+ def __setstate__(self, d):
141
+ self.__dict__ = d
142
+
143
+ # for backward compatibility
144
+ if not hasattr(self, "sp_model_kwargs"):
145
+ self.sp_model_kwargs = {}
146
+
147
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
148
+ self.sp_model.Load(self.vocab_file)
149
+
150
+ def _tokenize(self, text: str) -> List[str]:
151
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
152
+ return self.sp_model.encode(text, out_type=str)
153
+
154
+ def _convert_token_to_id(self, token):
155
+ """Converts a token (str) in an id using the vocab."""
156
+ return self.sp_model.piece_to_id(token)
157
+
158
+ def _convert_id_to_token(self, index):
159
+ """Converts an index (integer) in a token (str) using the vocab."""
160
+ token = self.sp_model.IdToPiece(index)
161
+ return token
162
+
163
+ # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string
164
+ def convert_tokens_to_string(self, tokens):
165
+ """Converts a sequence of tokens (string) in a single string."""
166
+ current_sub_tokens = []
167
+ out_string = ""
168
+ prev_is_special = False
169
+ for token in tokens:
170
+ # make sure that special tokens are not decoded using sentencepiece model
171
+ if token in self.all_special_tokens:
172
+ if not prev_is_special:
173
+ out_string += " "
174
+ out_string += self.sp_model.decode(current_sub_tokens) + token
175
+ prev_is_special = True
176
+ current_sub_tokens = []
177
+ else:
178
+ current_sub_tokens.append(token)
179
+ prev_is_special = False
180
+ out_string += self.sp_model.decode(current_sub_tokens)
181
+ return out_string.strip()
182
+
183
+ def _decode(
184
+ self,
185
+ token_ids: List[int],
186
+ skip_special_tokens: bool = False,
187
+ clean_up_tokenization_spaces: bool = None,
188
+ spaces_between_special_tokens: bool = True,
189
+ **kwargs,
190
+ ) -> str:
191
+ self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
192
+
193
+ filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
194
+
195
+ # To avoid mixing byte-level and unicode for byte-level BPT
196
+ # we need to build string separately for added tokens and byte-level tokens
197
+ # cf. https://github.com/huggingface/transformers/issues/1133
198
+ sub_texts = []
199
+ current_sub_text = []
200
+ for token in filtered_tokens:
201
+ if skip_special_tokens and token in self.all_special_ids:
202
+ continue
203
+ if token in self.added_tokens_encoder:
204
+ if current_sub_text:
205
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
206
+ current_sub_text = []
207
+ sub_texts.append(token)
208
+ else:
209
+ current_sub_text.append(token)
210
+ if current_sub_text:
211
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
212
+
213
+ # Mimic the behavior of the Rust tokenizer:
214
+ # No space before [MASK] and [SEP]
215
+ if spaces_between_special_tokens:
216
+ text = re.sub(r" (\[(MASK|SEP)\])", r"\1", " ".join(sub_texts))
217
+ else:
218
+ text = "".join(sub_texts)
219
+
220
+ clean_up_tokenization_spaces = (
221
+ clean_up_tokenization_spaces
222
+ if clean_up_tokenization_spaces is not None
223
+ else self.clean_up_tokenization_spaces
224
+ )
225
+ if clean_up_tokenization_spaces:
226
+ clean_text = self.clean_up_tokenization(text)
227
+ return clean_text
228
+ else:
229
+ return text
230
+
231
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
232
+ if not os.path.isdir(save_directory):
233
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
234
+ return
235
+ out_vocab_file = os.path.join(
236
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
237
+ )
238
+
239
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
240
+ copyfile(self.vocab_file, out_vocab_file)
241
+ elif not os.path.isfile(self.vocab_file):
242
+ with open(out_vocab_file, "wb") as fi:
243
+ content_spiece_model = self.sp_model.serialized_model_proto()
244
+ fi.write(content_spiece_model)
245
+
246
+ return (out_vocab_file,)
247
+
248
+ def build_inputs_with_special_tokens(
249
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
250
+ ) -> List[int]:
251
+ """
252
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
253
+ adding special tokens. A Big Bird sequence has the following format:
254
+
255
+ - single sequence: `[CLS] X [SEP]`
256
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
257
+
258
+ Args:
259
+ token_ids_0 (`List[int]`):
260
+ List of IDs to which the special tokens will be added.
261
+ token_ids_1 (`List[int]`, *optional*):
262
+ Optional second list of IDs for sequence pairs.
263
+
264
+ Returns:
265
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
266
+ """
267
+ if token_ids_1 is None:
268
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
269
+ cls = [self.cls_token_id]
270
+ sep = [self.sep_token_id]
271
+ return cls + token_ids_0 + sep + token_ids_1 + sep
272
+
273
+ def get_special_tokens_mask(
274
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
275
+ ) -> List[int]:
276
+ """
277
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
278
+ special tokens using the tokenizer `prepare_for_model` method.
279
+
280
+ Args:
281
+ token_ids_0 (`List[int]`):
282
+ List of IDs.
283
+ token_ids_1 (`List[int]`, *optional*):
284
+ Optional second list of IDs for sequence pairs.
285
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
286
+ Whether or not the token list is already formatted with special tokens for the model.
287
+
288
+ Returns:
289
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
290
+ """
291
+ if already_has_special_tokens:
292
+ return super().get_special_tokens_mask(
293
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
294
+ )
295
+
296
+ if token_ids_1 is None:
297
+ return [1] + ([0] * len(token_ids_0)) + [1]
298
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
299
+
300
+ def create_token_type_ids_from_sequences(
301
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
302
+ ) -> List[int]:
303
+ """
304
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
305
+ pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
306
+ sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
307
+
308
+ Args:
309
+ token_ids_0 (`List[int]`):
310
+ List of IDs.
311
+ token_ids_1 (`List[int]`, *optional*):
312
+ Optional second list of IDs for sequence pairs.
313
+
314
+ Returns:
315
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
316
+ """
317
+ sep = [self.sep_token_id]
318
+ cls = [self.cls_token_id]
319
+ if token_ids_1 is None:
320
+ return len(cls + token_ids_0 + sep) * [0]
321
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
322
+
323
+
324
+ __all__ = ["BigBirdTokenizer"]
janus/lib/python3.10/site-packages/transformers/models/big_bird/tokenization_big_bird_fast.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Big Bird model."""
16
+
17
+ import os
18
+ from shutil import copyfile
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import AddedToken
22
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
23
+ from ...utils import is_sentencepiece_available, logging
24
+
25
+
26
+ if is_sentencepiece_available():
27
+ from .tokenization_big_bird import BigBirdTokenizer
28
+ else:
29
+ BigBirdTokenizer = None
30
+
31
+ logger = logging.get_logger(__name__)
32
+ VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
33
+
34
+
35
+ SPIECE_UNDERLINE = "▁"
36
+
37
+
38
+ class BigBirdTokenizerFast(PreTrainedTokenizerFast):
39
+ """
40
+ Construct a "fast" BigBird tokenizer (backed by HuggingFace's *tokenizers* library). Based on
41
+ [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This
42
+ tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to
43
+ this superclass for more information regarding those methods
44
+
45
+ Args:
46
+ vocab_file (`str`):
47
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
48
+ contains the vocabulary necessary to instantiate a tokenizer.
49
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
50
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
51
+
52
+ <Tip>
53
+
54
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
55
+ sequence. The token used is the `cls_token`.
56
+
57
+ </Tip>
58
+
59
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
60
+ The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token
61
+ that is used for the end of sequence. The token used is the `sep_token`.
62
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
63
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
64
+ token instead.
65
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
66
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
67
+ sequence classification or for a text and a question for question answering. It is also used as the last
68
+ token of a sequence built with special tokens.
69
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
70
+ The token used for padding, for example when batching sequences of different lengths.
71
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
72
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
73
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
74
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
75
+ The token used for masking values. This is the token used when training this model with masked language
76
+ modeling. This is the token which the model will try to predict.
77
+ """
78
+
79
+ vocab_files_names = VOCAB_FILES_NAMES
80
+ slow_tokenizer_class = BigBirdTokenizer
81
+ model_input_names = ["input_ids", "attention_mask"]
82
+ prefix_tokens: List[int] = []
83
+
84
+ def __init__(
85
+ self,
86
+ vocab_file=None,
87
+ tokenizer_file=None,
88
+ unk_token="<unk>",
89
+ bos_token="<s>",
90
+ eos_token="</s>",
91
+ pad_token="<pad>",
92
+ sep_token="[SEP]",
93
+ mask_token="[MASK]",
94
+ cls_token="[CLS]",
95
+ **kwargs,
96
+ ):
97
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
98
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
99
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
100
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
101
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
102
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
103
+
104
+ # Mask token behave like a normal word, i.e. include the space before it
105
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
106
+
107
+ super().__init__(
108
+ vocab_file,
109
+ tokenizer_file=tokenizer_file,
110
+ bos_token=bos_token,
111
+ eos_token=eos_token,
112
+ unk_token=unk_token,
113
+ sep_token=sep_token,
114
+ pad_token=pad_token,
115
+ cls_token=cls_token,
116
+ mask_token=mask_token,
117
+ **kwargs,
118
+ )
119
+
120
+ self.vocab_file = vocab_file
121
+
122
+ @property
123
+ def can_save_slow_tokenizer(self) -> bool:
124
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
125
+
126
+ def build_inputs_with_special_tokens(
127
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
128
+ ) -> List[int]:
129
+ """
130
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
131
+ adding special tokens. An BigBird sequence has the following format:
132
+
133
+ - single sequence: `[CLS] X [SEP]`
134
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
135
+
136
+ Args:
137
+ token_ids_0 (`List[int]`):
138
+ List of IDs to which the special tokens will be added
139
+ token_ids_1 (`List[int]`, *optional*):
140
+ Optional second list of IDs for sequence pairs.
141
+
142
+ Returns:
143
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
144
+ """
145
+ sep = [self.sep_token_id]
146
+ cls = [self.cls_token_id]
147
+ if token_ids_1 is None:
148
+ return cls + token_ids_0 + sep
149
+ return cls + token_ids_0 + sep + token_ids_1 + sep
150
+
151
+ def get_special_tokens_mask(
152
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
153
+ ) -> List[int]:
154
+ """
155
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
156
+ special tokens using the tokenizer `prepare_for_model` method.
157
+
158
+ Args:
159
+ token_ids_0 (`List[int]`):
160
+ List of ids.
161
+ token_ids_1 (`List[int]`, *optional*):
162
+ Optional second list of IDs for sequence pairs.
163
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
164
+ Set to True if the token list is already formatted with special tokens for the model
165
+
166
+ Returns:
167
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
168
+ """
169
+
170
+ if already_has_special_tokens:
171
+ if token_ids_1 is not None:
172
+ raise ValueError(
173
+ "You should not supply a second sequence if the provided sequence of "
174
+ "ids is already formatted with special tokens for the model."
175
+ )
176
+ return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
177
+
178
+ if token_ids_1 is None:
179
+ return [1] + ([0] * len(token_ids_0)) + [1]
180
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
181
+
182
+ def create_token_type_ids_from_sequences(
183
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
184
+ ) -> List[int]:
185
+ """
186
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
187
+ sequence pair mask has the following format:
188
+
189
+ ```
190
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
191
+ | first sequence | second sequence |
192
+ ```
193
+
194
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
195
+
196
+ Args:
197
+ token_ids_0 (`List[int]`):
198
+ List of ids.
199
+ token_ids_1 (`List[int]`, *optional*):
200
+ Optional second list of IDs for sequence pairs.
201
+
202
+ Returns:
203
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
204
+ """
205
+ sep = [self.sep_token_id]
206
+ cls = [self.cls_token_id]
207
+
208
+ if token_ids_1 is None:
209
+ return len(cls + token_ids_0 + sep) * [0]
210
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
211
+
212
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
213
+ if not self.can_save_slow_tokenizer:
214
+ raise ValueError(
215
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
216
+ "tokenizer."
217
+ )
218
+
219
+ if not os.path.isdir(save_directory):
220
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
221
+ return
222
+ out_vocab_file = os.path.join(
223
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
224
+ )
225
+
226
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
227
+ copyfile(self.vocab_file, out_vocab_file)
228
+
229
+ return (out_vocab_file,)
230
+
231
+
232
+ __all__ = ["BigBirdTokenizerFast"]
janus/lib/python3.10/site-packages/transformers/models/bros/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_bros import *
22
+ from .modeling_bros import *
23
+ from .processing_bros import *
24
+ else:
25
+ import sys
26
+
27
+ _file = globals()["__file__"]
28
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/bros/__pycache__/processing_bros.cpython-310.pyc ADDED
Binary file (3.58 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/convnextv2/__init__.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_convnextv2 import *
22
+ from .modeling_convnextv2 import *
23
+ from .modeling_tf_convnextv2 import *
24
+ else:
25
+ import sys
26
+
27
+ _file = globals()["__file__"]
28
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (583 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/configuration_convnextv2.cpython-310.pyc ADDED
Binary file (5.09 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_convnextv2.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/convnextv2/__pycache__/modeling_tf_convnextv2.cpython-310.pyc ADDED
Binary file (22.2 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/convnextv2/configuration_convnextv2.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ConvNeXTV2 model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+ from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class ConvNextV2Config(BackboneConfigMixin, PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an
28
+ ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a
29
+ configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2
30
+ [facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture.
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ num_channels (`int`, *optional*, defaults to 3):
37
+ The number of input channels.
38
+ patch_size (`int`, *optional*, defaults to 4):
39
+ Patch size to use in the patch embedding layer.
40
+ num_stages (`int`, *optional*, defaults to 4):
41
+ The number of stages in the model.
42
+ hidden_sizes (`List[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
43
+ Dimensionality (hidden size) at each stage.
44
+ depths (`List[int]`, *optional*, defaults to `[3, 3, 9, 3]`):
45
+ Depth (number of blocks) for each stage.
46
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
47
+ The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
48
+ `"selu"` and `"gelu_new"` are supported.
49
+ initializer_range (`float`, *optional*, defaults to 0.02):
50
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
51
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
52
+ The epsilon used by the layer normalization layers.
53
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
54
+ The drop rate for stochastic depth.
55
+ image_size (`int`, *optional*, defaults to 224):
56
+ The size (resolution) of each image.
57
+ out_features (`List[str]`, *optional*):
58
+ If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
59
+ (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
60
+ corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
61
+ same order as defined in the `stage_names` attribute.
62
+ out_indices (`List[int]`, *optional*):
63
+ If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
64
+ many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
65
+ If unset and `out_features` is unset, will default to the last stage. Must be in the
66
+ same order as defined in the `stage_names` attribute.
67
+
68
+ Example:
69
+ ```python
70
+ >>> from transformers import ConvNeXTV2Config, ConvNextV2Model
71
+
72
+ >>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration
73
+ >>> configuration = ConvNeXTV2Config()
74
+
75
+ >>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration
76
+ >>> model = ConvNextV2Model(configuration)
77
+
78
+ >>> # Accessing the model configuration
79
+ >>> configuration = model.config
80
+ ```"""
81
+
82
+ model_type = "convnextv2"
83
+
84
+ def __init__(
85
+ self,
86
+ num_channels=3,
87
+ patch_size=4,
88
+ num_stages=4,
89
+ hidden_sizes=None,
90
+ depths=None,
91
+ hidden_act="gelu",
92
+ initializer_range=0.02,
93
+ layer_norm_eps=1e-12,
94
+ drop_path_rate=0.0,
95
+ image_size=224,
96
+ out_features=None,
97
+ out_indices=None,
98
+ **kwargs,
99
+ ):
100
+ super().__init__(**kwargs)
101
+
102
+ self.num_channels = num_channels
103
+ self.patch_size = patch_size
104
+ self.num_stages = num_stages
105
+ self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
106
+ self.depths = [3, 3, 9, 3] if depths is None else depths
107
+ self.hidden_act = hidden_act
108
+ self.initializer_range = initializer_range
109
+ self.layer_norm_eps = layer_norm_eps
110
+ self.drop_path_rate = drop_path_rate
111
+ self.image_size = image_size
112
+ self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
113
+ self._out_features, self._out_indices = get_aligned_output_features_output_indices(
114
+ out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
115
+ )
116
+
117
+
118
+ __all__ = ["ConvNextV2Config"]
janus/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_convnextv2.py ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch ConvNextV2 model."""
16
+
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
23
+
24
+ from ...activations import ACT2FN
25
+ from ...modeling_outputs import (
26
+ BackboneOutput,
27
+ BaseModelOutputWithNoAttention,
28
+ BaseModelOutputWithPoolingAndNoAttention,
29
+ ImageClassifierOutputWithNoAttention,
30
+ )
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import (
33
+ add_code_sample_docstrings,
34
+ add_start_docstrings,
35
+ add_start_docstrings_to_model_forward,
36
+ logging,
37
+ replace_return_docstrings,
38
+ )
39
+ from ...utils.backbone_utils import BackboneMixin
40
+ from .configuration_convnextv2 import ConvNextV2Config
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ # General docstring
46
+ _CONFIG_FOR_DOC = "ConvNextV2Config"
47
+
48
+ # Base docstring
49
+ _CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
50
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
51
+
52
+ # Image classification docstring
53
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
54
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
55
+
56
+
57
+ # Copied from transformers.models.beit.modeling_beit.drop_path
58
+ def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
59
+ """
60
+ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
61
+
62
+ Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
63
+ however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
64
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
65
+ layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
66
+ argument.
67
+ """
68
+ if drop_prob == 0.0 or not training:
69
+ return input
70
+ keep_prob = 1 - drop_prob
71
+ shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
72
+ random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
73
+ random_tensor.floor_() # binarize
74
+ output = input.div(keep_prob) * random_tensor
75
+ return output
76
+
77
+
78
+ # Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNextV2
79
+ class ConvNextV2DropPath(nn.Module):
80
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
81
+
82
+ def __init__(self, drop_prob: Optional[float] = None) -> None:
83
+ super().__init__()
84
+ self.drop_prob = drop_prob
85
+
86
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
87
+ return drop_path(hidden_states, self.drop_prob, self.training)
88
+
89
+ def extra_repr(self) -> str:
90
+ return "p={}".format(self.drop_prob)
91
+
92
+
93
+ class ConvNextV2GRN(nn.Module):
94
+ """GRN (Global Response Normalization) layer"""
95
+
96
+ def __init__(self, dim: int):
97
+ super().__init__()
98
+ self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
99
+ self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))
100
+
101
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
102
+ # Compute and normalize global spatial feature maps
103
+ global_features = torch.norm(hidden_states, p=2, dim=(1, 2), keepdim=True)
104
+ norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-6)
105
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
106
+
107
+ return hidden_states
108
+
109
+
110
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->ConvNextV2
111
+ class ConvNextV2LayerNorm(nn.Module):
112
+ r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
113
+ The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
114
+ width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
115
+ """
116
+
117
+ def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
118
+ super().__init__()
119
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
120
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
121
+ self.eps = eps
122
+ self.data_format = data_format
123
+ if self.data_format not in ["channels_last", "channels_first"]:
124
+ raise NotImplementedError(f"Unsupported data format: {self.data_format}")
125
+ self.normalized_shape = (normalized_shape,)
126
+
127
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
128
+ if self.data_format == "channels_last":
129
+ x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
130
+ elif self.data_format == "channels_first":
131
+ input_dtype = x.dtype
132
+ x = x.float()
133
+ u = x.mean(1, keepdim=True)
134
+ s = (x - u).pow(2).mean(1, keepdim=True)
135
+ x = (x - u) / torch.sqrt(s + self.eps)
136
+ x = x.to(dtype=input_dtype)
137
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
138
+ return x
139
+
140
+
141
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextEmbeddings with ConvNext->ConvNextV2
142
+ class ConvNextV2Embeddings(nn.Module):
143
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
144
+ found in src/transformers/models/swin/modeling_swin.py.
145
+ """
146
+
147
+ def __init__(self, config):
148
+ super().__init__()
149
+ self.patch_embeddings = nn.Conv2d(
150
+ config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
151
+ )
152
+ self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
153
+ self.num_channels = config.num_channels
154
+
155
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
156
+ num_channels = pixel_values.shape[1]
157
+ if num_channels != self.num_channels:
158
+ raise ValueError(
159
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
160
+ )
161
+ embeddings = self.patch_embeddings(pixel_values)
162
+ embeddings = self.layernorm(embeddings)
163
+ return embeddings
164
+
165
+
166
+ class ConvNextV2Layer(nn.Module):
167
+ """This corresponds to the `Block` class in the original implementation.
168
+
169
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
170
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
171
+
172
+ The authors used (2) as they find it slightly faster in PyTorch.
173
+
174
+ Args:
175
+ config ([`ConvNextV2Config`]): Model configuration class.
176
+ dim (`int`): Number of input channels.
177
+ drop_path (`float`): Stochastic depth rate. Default: 0.0.
178
+ """
179
+
180
+ def __init__(self, config, dim, drop_path=0):
181
+ super().__init__()
182
+ # depthwise conv
183
+ self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
184
+ self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-6)
185
+ # pointwise/1x1 convs, implemented with linear layers
186
+ self.pwconv1 = nn.Linear(dim, 4 * dim)
187
+ self.act = ACT2FN[config.hidden_act]
188
+ self.grn = ConvNextV2GRN(4 * dim)
189
+ self.pwconv2 = nn.Linear(4 * dim, dim)
190
+ self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
191
+
192
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
193
+ input = hidden_states
194
+ x = self.dwconv(hidden_states)
195
+ # (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
196
+ x = x.permute(0, 2, 3, 1)
197
+ x = self.layernorm(x)
198
+ x = self.pwconv1(x)
199
+ x = self.act(x)
200
+ x = self.grn(x)
201
+ x = self.pwconv2(x)
202
+ # (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
203
+ x = x.permute(0, 3, 1, 2)
204
+
205
+ x = input + self.drop_path(x)
206
+ return x
207
+
208
+
209
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextStage with ConvNeXT->ConvNeXTV2, ConvNext->ConvNextV2
210
+ class ConvNextV2Stage(nn.Module):
211
+ """ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
212
+
213
+ Args:
214
+ config ([`ConvNextV2Config`]): Model configuration class.
215
+ in_channels (`int`): Number of input channels.
216
+ out_channels (`int`): Number of output channels.
217
+ depth (`int`): Number of residual blocks.
218
+ drop_path_rates(`List[float]`): Stochastic depth rates for each layer.
219
+ """
220
+
221
+ def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
222
+ super().__init__()
223
+
224
+ if in_channels != out_channels or stride > 1:
225
+ self.downsampling_layer = nn.Sequential(
226
+ ConvNextV2LayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
227
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
228
+ )
229
+ else:
230
+ self.downsampling_layer = nn.Identity()
231
+ drop_path_rates = drop_path_rates or [0.0] * depth
232
+ self.layers = nn.Sequential(
233
+ *[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
234
+ )
235
+
236
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
237
+ hidden_states = self.downsampling_layer(hidden_states)
238
+ hidden_states = self.layers(hidden_states)
239
+ return hidden_states
240
+
241
+
242
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextEncoder with ConvNext->ConvNextV2
243
+ class ConvNextV2Encoder(nn.Module):
244
+ def __init__(self, config):
245
+ super().__init__()
246
+ self.stages = nn.ModuleList()
247
+ drop_path_rates = [
248
+ x.tolist() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths)).split(config.depths)
249
+ ]
250
+ prev_chs = config.hidden_sizes[0]
251
+ for i in range(config.num_stages):
252
+ out_chs = config.hidden_sizes[i]
253
+ stage = ConvNextV2Stage(
254
+ config,
255
+ in_channels=prev_chs,
256
+ out_channels=out_chs,
257
+ stride=2 if i > 0 else 1,
258
+ depth=config.depths[i],
259
+ drop_path_rates=drop_path_rates[i],
260
+ )
261
+ self.stages.append(stage)
262
+ prev_chs = out_chs
263
+
264
+ def forward(
265
+ self,
266
+ hidden_states: torch.FloatTensor,
267
+ output_hidden_states: Optional[bool] = False,
268
+ return_dict: Optional[bool] = True,
269
+ ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
270
+ all_hidden_states = () if output_hidden_states else None
271
+
272
+ for i, layer_module in enumerate(self.stages):
273
+ if output_hidden_states:
274
+ all_hidden_states = all_hidden_states + (hidden_states,)
275
+
276
+ hidden_states = layer_module(hidden_states)
277
+
278
+ if output_hidden_states:
279
+ all_hidden_states = all_hidden_states + (hidden_states,)
280
+
281
+ if not return_dict:
282
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
283
+
284
+ return BaseModelOutputWithNoAttention(
285
+ last_hidden_state=hidden_states,
286
+ hidden_states=all_hidden_states,
287
+ )
288
+
289
+
290
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextPreTrainedModel with ConvNext->ConvNextV2, convnext->convnextv2
291
+ class ConvNextV2PreTrainedModel(PreTrainedModel):
292
+ """
293
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
294
+ models.
295
+ """
296
+
297
+ config_class = ConvNextV2Config
298
+ base_model_prefix = "convnextv2"
299
+ main_input_name = "pixel_values"
300
+ _no_split_modules = ["ConvNextV2Layer"]
301
+
302
+ def _init_weights(self, module):
303
+ """Initialize the weights"""
304
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
305
+ # Slightly different from the TF version which uses truncated_normal for initialization
306
+ # cf https://github.com/pytorch/pytorch/pull/5617
307
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
308
+ if module.bias is not None:
309
+ module.bias.data.zero_()
310
+ elif isinstance(module, nn.LayerNorm):
311
+ module.bias.data.zero_()
312
+ module.weight.data.fill_(1.0)
313
+
314
+
315
+ CONVNEXTV2_START_DOCSTRING = r"""
316
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
317
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
318
+ behavior.
319
+
320
+ Parameters:
321
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
322
+ Initializing with a config file does not load the weights associated with the model, only the
323
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
324
+ """
325
+
326
+ CONVNEXTV2_INPUTS_DOCSTRING = r"""
327
+ Args:
328
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
329
+ Pixel values. Pixel values can be obtained using [`ConvNextImageProcessor`]. See
330
+ [`ConvNextImageProcessor.__call__`] for details.
331
+ output_hidden_states (`bool`, *optional*):
332
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
333
+ more detail.
334
+ return_dict (`bool`, *optional*):
335
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
336
+ """
337
+
338
+
339
+ @add_start_docstrings(
340
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
341
+ CONVNEXTV2_START_DOCSTRING,
342
+ )
343
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextModel with CONVNEXT->CONVNEXTV2, ConvNext->ConvNextV2
344
+ class ConvNextV2Model(ConvNextV2PreTrainedModel):
345
+ def __init__(self, config):
346
+ super().__init__(config)
347
+ self.config = config
348
+
349
+ self.embeddings = ConvNextV2Embeddings(config)
350
+ self.encoder = ConvNextV2Encoder(config)
351
+
352
+ # final layernorm layer
353
+ self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
354
+
355
+ # Initialize weights and apply final processing
356
+ self.post_init()
357
+
358
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
359
+ @add_code_sample_docstrings(
360
+ checkpoint=_CHECKPOINT_FOR_DOC,
361
+ output_type=BaseModelOutputWithPoolingAndNoAttention,
362
+ config_class=_CONFIG_FOR_DOC,
363
+ modality="vision",
364
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
365
+ )
366
+ def forward(
367
+ self,
368
+ pixel_values: torch.FloatTensor = None,
369
+ output_hidden_states: Optional[bool] = None,
370
+ return_dict: Optional[bool] = None,
371
+ ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]:
372
+ output_hidden_states = (
373
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
374
+ )
375
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
376
+
377
+ if pixel_values is None:
378
+ raise ValueError("You have to specify pixel_values")
379
+
380
+ embedding_output = self.embeddings(pixel_values)
381
+
382
+ encoder_outputs = self.encoder(
383
+ embedding_output,
384
+ output_hidden_states=output_hidden_states,
385
+ return_dict=return_dict,
386
+ )
387
+
388
+ last_hidden_state = encoder_outputs[0]
389
+
390
+ # global average pooling, (N, C, H, W) -> (N, C)
391
+ pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
392
+
393
+ if not return_dict:
394
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
395
+
396
+ return BaseModelOutputWithPoolingAndNoAttention(
397
+ last_hidden_state=last_hidden_state,
398
+ pooler_output=pooled_output,
399
+ hidden_states=encoder_outputs.hidden_states,
400
+ )
401
+
402
+
403
+ @add_start_docstrings(
404
+ """
405
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
406
+ ImageNet.
407
+ """,
408
+ CONVNEXTV2_START_DOCSTRING,
409
+ )
410
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextForImageClassification with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,convnext->convnextv2
411
+ class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel):
412
+ def __init__(self, config):
413
+ super().__init__(config)
414
+
415
+ self.num_labels = config.num_labels
416
+ self.convnextv2 = ConvNextV2Model(config)
417
+
418
+ # Classifier head
419
+ self.classifier = (
420
+ nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
421
+ )
422
+
423
+ # Initialize weights and apply final processing
424
+ self.post_init()
425
+
426
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
427
+ @add_code_sample_docstrings(
428
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
429
+ output_type=ImageClassifierOutputWithNoAttention,
430
+ config_class=_CONFIG_FOR_DOC,
431
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
432
+ )
433
+ def forward(
434
+ self,
435
+ pixel_values: torch.FloatTensor = None,
436
+ labels: Optional[torch.LongTensor] = None,
437
+ output_hidden_states: Optional[bool] = None,
438
+ return_dict: Optional[bool] = None,
439
+ ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
440
+ r"""
441
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
442
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
443
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
444
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
445
+ """
446
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
447
+
448
+ outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
449
+
450
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
451
+
452
+ logits = self.classifier(pooled_output)
453
+
454
+ loss = None
455
+ if labels is not None:
456
+ if self.config.problem_type is None:
457
+ if self.num_labels == 1:
458
+ self.config.problem_type = "regression"
459
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
460
+ self.config.problem_type = "single_label_classification"
461
+ else:
462
+ self.config.problem_type = "multi_label_classification"
463
+
464
+ if self.config.problem_type == "regression":
465
+ loss_fct = MSELoss()
466
+ if self.num_labels == 1:
467
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
468
+ else:
469
+ loss = loss_fct(logits, labels)
470
+ elif self.config.problem_type == "single_label_classification":
471
+ loss_fct = CrossEntropyLoss()
472
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
473
+ elif self.config.problem_type == "multi_label_classification":
474
+ loss_fct = BCEWithLogitsLoss()
475
+ loss = loss_fct(logits, labels)
476
+ if not return_dict:
477
+ output = (logits,) + outputs[2:]
478
+ return ((loss,) + output) if loss is not None else output
479
+
480
+ return ImageClassifierOutputWithNoAttention(
481
+ loss=loss,
482
+ logits=logits,
483
+ hidden_states=outputs.hidden_states,
484
+ )
485
+
486
+
487
+ @add_start_docstrings(
488
+ """
489
+ ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.
490
+ """,
491
+ CONVNEXTV2_START_DOCSTRING,
492
+ )
493
+ # Copied from transformers.models.convnext.modeling_convnext.ConvNextBackbone with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,facebook/convnext-tiny-224->facebook/convnextv2-tiny-1k-224
494
+ class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin):
495
+ def __init__(self, config):
496
+ super().__init__(config)
497
+ super()._init_backbone(config)
498
+
499
+ self.embeddings = ConvNextV2Embeddings(config)
500
+ self.encoder = ConvNextV2Encoder(config)
501
+ self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
502
+
503
+ # Add layer norms to hidden states of out_features
504
+ hidden_states_norms = {}
505
+ for stage, num_channels in zip(self._out_features, self.channels):
506
+ hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format="channels_first")
507
+ self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
508
+
509
+ # initialize weights and apply final processing
510
+ self.post_init()
511
+
512
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
513
+ @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC)
514
+ def forward(
515
+ self,
516
+ pixel_values: torch.Tensor,
517
+ output_hidden_states: Optional[bool] = None,
518
+ return_dict: Optional[bool] = None,
519
+ ) -> BackboneOutput:
520
+ """
521
+ Returns:
522
+
523
+ Examples:
524
+
525
+ ```python
526
+ >>> from transformers import AutoImageProcessor, AutoBackbone
527
+ >>> import torch
528
+ >>> from PIL import Image
529
+ >>> import requests
530
+
531
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
532
+ >>> image = Image.open(requests.get(url, stream=True).raw)
533
+
534
+ >>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224")
535
+ >>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224")
536
+
537
+ >>> inputs = processor(image, return_tensors="pt")
538
+ >>> outputs = model(**inputs)
539
+ ```"""
540
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
541
+ output_hidden_states = (
542
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
543
+ )
544
+
545
+ embedding_output = self.embeddings(pixel_values)
546
+
547
+ outputs = self.encoder(
548
+ embedding_output,
549
+ output_hidden_states=True,
550
+ return_dict=return_dict,
551
+ )
552
+
553
+ hidden_states = outputs.hidden_states if return_dict else outputs[1]
554
+
555
+ feature_maps = ()
556
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
557
+ if stage in self.out_features:
558
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
559
+ feature_maps += (hidden_state,)
560
+
561
+ if not return_dict:
562
+ output = (feature_maps,)
563
+ if output_hidden_states:
564
+ output += (hidden_states,)
565
+ return output
566
+
567
+ return BackboneOutput(
568
+ feature_maps=feature_maps,
569
+ hidden_states=hidden_states if output_hidden_states else None,
570
+ attentions=None,
571
+ )
572
+
573
+
574
+ __all__ = ["ConvNextV2ForImageClassification", "ConvNextV2Model", "ConvNextV2PreTrainedModel", "ConvNextV2Backbone"]
janus/lib/python3.10/site-packages/transformers/models/convnextv2/modeling_tf_convnextv2.py ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TF 2.0 ConvNextV2 model."""
16
+
17
+ from __future__ import annotations
18
+
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import tensorflow as tf
23
+
24
+ from ...activations_tf import get_tf_activation
25
+ from ...modeling_tf_outputs import (
26
+ TFBaseModelOutputWithNoAttention,
27
+ TFBaseModelOutputWithPooling,
28
+ TFBaseModelOutputWithPoolingAndNoAttention,
29
+ TFImageClassifierOutputWithNoAttention,
30
+ )
31
+ from ...modeling_tf_utils import (
32
+ TFModelInputType,
33
+ TFPreTrainedModel,
34
+ TFSequenceClassificationLoss,
35
+ get_initializer,
36
+ keras,
37
+ keras_serializable,
38
+ unpack_inputs,
39
+ )
40
+ from ...tf_utils import shape_list
41
+ from ...utils import (
42
+ add_code_sample_docstrings,
43
+ add_start_docstrings,
44
+ add_start_docstrings_to_model_forward,
45
+ logging,
46
+ )
47
+ from .configuration_convnextv2 import ConvNextV2Config
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ # General docstring
53
+ _CONFIG_FOR_DOC = "ConvNextV2Config"
54
+
55
+ # Base docstring
56
+ _CHECKPOINT_FOR_DOC = "facebook/convnextv2-tiny-1k-224"
57
+ _EXPECTED_OUTPUT_SHAPE = [1, 768, 7, 7]
58
+
59
+ # Image classification docstring
60
+ _IMAGE_CLASS_CHECKPOINT = "facebook/convnextv2-tiny-1k-224"
61
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
62
+
63
+
64
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextDropPath with ConvNext->ConvNextV2
65
+ class TFConvNextV2DropPath(keras.layers.Layer):
66
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
67
+ References:
68
+ (1) github.com:rwightman/pytorch-image-models
69
+ """
70
+
71
+ def __init__(self, drop_path: float, **kwargs):
72
+ super().__init__(**kwargs)
73
+ self.drop_path = drop_path
74
+
75
+ def call(self, x: tf.Tensor, training=None):
76
+ if training:
77
+ keep_prob = 1 - self.drop_path
78
+ shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
79
+ random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
80
+ random_tensor = tf.floor(random_tensor)
81
+ return (x / keep_prob) * random_tensor
82
+ return x
83
+
84
+
85
+ class TFConvNextV2GRN(keras.layers.Layer):
86
+ """GRN (Global Response Normalization) layer"""
87
+
88
+ def __init__(self, config: ConvNextV2Config, dim: int, **kwargs):
89
+ super().__init__(**kwargs)
90
+ self.dim = dim
91
+
92
+ def build(self, input_shape: tf.TensorShape = None):
93
+ # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)
94
+ self.weight = self.add_weight(
95
+ name="weight",
96
+ shape=(1, 1, 1, self.dim),
97
+ initializer=keras.initializers.Zeros(),
98
+ )
99
+ self.bias = self.add_weight(
100
+ name="bias",
101
+ shape=(1, 1, 1, self.dim),
102
+ initializer=keras.initializers.Zeros(),
103
+ )
104
+ return super().build(input_shape)
105
+
106
+ def call(self, hidden_states: tf.Tensor):
107
+ global_features = tf.norm(hidden_states, ord="euclidean", axis=(1, 2), keepdims=True)
108
+ norm_features = global_features / (tf.reduce_mean(global_features, axis=-1, keepdims=True) + 1e-6)
109
+ hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
110
+ return hidden_states
111
+
112
+
113
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextEmbeddings with ConvNext->ConvNextV2
114
+ class TFConvNextV2Embeddings(keras.layers.Layer):
115
+ """This class is comparable to (and inspired by) the SwinEmbeddings class
116
+ found in src/transformers/models/swin/modeling_swin.py.
117
+ """
118
+
119
+ def __init__(self, config: ConvNextV2Config, **kwargs):
120
+ super().__init__(**kwargs)
121
+ self.patch_embeddings = keras.layers.Conv2D(
122
+ filters=config.hidden_sizes[0],
123
+ kernel_size=config.patch_size,
124
+ strides=config.patch_size,
125
+ name="patch_embeddings",
126
+ kernel_initializer=get_initializer(config.initializer_range),
127
+ bias_initializer=keras.initializers.Zeros(),
128
+ )
129
+ self.layernorm = keras.layers.LayerNormalization(epsilon=1e-6, name="layernorm")
130
+ self.num_channels = config.num_channels
131
+ self.config = config
132
+
133
+ def call(self, pixel_values):
134
+ if isinstance(pixel_values, dict):
135
+ pixel_values = pixel_values["pixel_values"]
136
+
137
+ tf.debugging.assert_equal(
138
+ shape_list(pixel_values)[1],
139
+ self.num_channels,
140
+ message="Make sure that the channel dimension of the pixel values match with the one set in the configuration.",
141
+ )
142
+
143
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
144
+ # So change the input format from `NCHW` to `NHWC`.
145
+ # shape = (batch_size, in_height, in_width, in_channels)
146
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
147
+
148
+ embeddings = self.patch_embeddings(pixel_values)
149
+ embeddings = self.layernorm(embeddings)
150
+ return embeddings
151
+
152
+ def build(self, input_shape=None):
153
+ if self.built:
154
+ return
155
+ self.built = True
156
+ if getattr(self, "patch_embeddings", None) is not None:
157
+ with tf.name_scope(self.patch_embeddings.name):
158
+ self.patch_embeddings.build([None, None, None, self.config.num_channels])
159
+ if getattr(self, "layernorm", None) is not None:
160
+ with tf.name_scope(self.layernorm.name):
161
+ self.layernorm.build([None, None, None, self.config.hidden_sizes[0]])
162
+
163
+
164
+ class TFConvNextV2Layer(keras.layers.Layer):
165
+ """This corresponds to the `Block` class in the original implementation.
166
+
167
+ There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
168
+ H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
169
+
170
+ The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow
171
+ NHWC ordering, we can just apply the operations straight-away without the permutation.
172
+
173
+ Args:
174
+ config (`ConvNextV2Config`):
175
+ Model configuration class.
176
+ dim (`int`):
177
+ Number of input channels.
178
+ drop_path (`float`, *optional*, defaults to 0.0):
179
+ Stochastic depth rate.
180
+ """
181
+
182
+ def __init__(self, config: ConvNextV2Config, dim: int, drop_path: float = 0.0, **kwargs):
183
+ super().__init__(**kwargs)
184
+ self.dim = dim
185
+ self.config = config
186
+ self.dwconv = keras.layers.Conv2D(
187
+ filters=dim,
188
+ kernel_size=7,
189
+ padding="same",
190
+ groups=dim,
191
+ kernel_initializer=get_initializer(config.initializer_range),
192
+ bias_initializer=keras.initializers.Zeros(),
193
+ name="dwconv",
194
+ ) # depthwise conv
195
+ self.layernorm = keras.layers.LayerNormalization(
196
+ epsilon=1e-6,
197
+ name="layernorm",
198
+ )
199
+ self.pwconv1 = keras.layers.Dense(
200
+ units=4 * dim,
201
+ kernel_initializer=get_initializer(config.initializer_range),
202
+ bias_initializer=keras.initializers.Zeros(),
203
+ name="pwconv1",
204
+ ) # pointwise/1x1 convs, implemented with linear layers
205
+ self.act = get_tf_activation(config.hidden_act)
206
+ self.grn = TFConvNextV2GRN(config, 4 * dim, dtype=tf.float32, name="grn")
207
+ self.pwconv2 = keras.layers.Dense(
208
+ units=dim,
209
+ kernel_initializer=get_initializer(config.initializer_range),
210
+ bias_initializer=keras.initializers.Zeros(),
211
+ name="pwconv2",
212
+ )
213
+ # Using `layers.Activation` instead of `tf.identity` to better control `training`
214
+ # behaviour.
215
+ self.drop_path = (
216
+ TFConvNextV2DropPath(drop_path, name="drop_path")
217
+ if drop_path > 0.0
218
+ else keras.layers.Activation("linear", name="drop_path")
219
+ )
220
+
221
+ def call(self, hidden_states, training=False):
222
+ input = hidden_states
223
+ x = self.dwconv(hidden_states)
224
+ x = self.layernorm(x)
225
+ x = self.pwconv1(x)
226
+ x = self.act(x)
227
+ x = self.grn(x)
228
+ x = self.pwconv2(x)
229
+ x = self.drop_path(x, training=training)
230
+ x = input + x
231
+ return x
232
+
233
+ def build(self, input_shape=None):
234
+ if self.built:
235
+ return
236
+ self.built = True
237
+ if getattr(self, "dwconv", None) is not None:
238
+ with tf.name_scope(self.dwconv.name):
239
+ self.dwconv.build([None, None, None, self.dim])
240
+ if getattr(self, "layernorm", None) is not None:
241
+ with tf.name_scope(self.layernorm.name):
242
+ self.layernorm.build([None, None, None, self.dim])
243
+ if getattr(self, "pwconv1", None) is not None:
244
+ with tf.name_scope(self.pwconv1.name):
245
+ self.pwconv1.build([None, None, self.dim])
246
+ if getattr(self, "grn", None) is not None:
247
+ with tf.name_scope(self.grn.name):
248
+ self.grn.build(None)
249
+ if getattr(self, "pwconv2", None) is not None:
250
+ with tf.name_scope(self.pwconv2.name):
251
+ self.pwconv2.build([None, None, 4 * self.dim])
252
+ if getattr(self, "drop_path", None) is not None:
253
+ with tf.name_scope(self.drop_path.name):
254
+ self.drop_path.build(None)
255
+
256
+
257
+ # Copied from transformers.models.convnext.modeling_tf_convnext.TFConvNextStage with ConvNext->ConvNextV2
258
+ class TFConvNextV2Stage(keras.layers.Layer):
259
+ """ConvNextV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
260
+
261
+ Args:
262
+ config (`ConvNextV2V2Config`):
263
+ Model configuration class.
264
+ in_channels (`int`):
265
+ Number of input channels.
266
+ out_channels (`int`):
267
+ Number of output channels.
268
+ depth (`int`):
269
+ Number of residual blocks.
270
+ drop_path_rates(`List[float]`):
271
+ Stochastic depth rates for each layer.
272
+ """
273
+
274
+ def __init__(
275
+ self,
276
+ config: ConvNextV2Config,
277
+ in_channels: int,
278
+ out_channels: int,
279
+ kernel_size: int = 2,
280
+ stride: int = 2,
281
+ depth: int = 2,
282
+ drop_path_rates: Optional[List[float]] = None,
283
+ **kwargs,
284
+ ):
285
+ super().__init__(**kwargs)
286
+ if in_channels != out_channels or stride > 1:
287
+ self.downsampling_layer = [
288
+ keras.layers.LayerNormalization(
289
+ epsilon=1e-6,
290
+ name="downsampling_layer.0",
291
+ ),
292
+ # Inputs to this layer will follow NHWC format since we
293
+ # transposed the inputs from NCHW to NHWC in the `TFConvNextV2Embeddings`
294
+ # layer. All the outputs throughout the model will be in NHWC
295
+ # from this point on until the output where we again change to
296
+ # NCHW.
297
+ keras.layers.Conv2D(
298
+ filters=out_channels,
299
+ kernel_size=kernel_size,
300
+ strides=stride,
301
+ kernel_initializer=get_initializer(config.initializer_range),
302
+ bias_initializer=keras.initializers.Zeros(),
303
+ name="downsampling_layer.1",
304
+ ),
305
+ ]
306
+ else:
307
+ self.downsampling_layer = [tf.identity]
308
+
309
+ drop_path_rates = drop_path_rates or [0.0] * depth
310
+ self.layers = [
311
+ TFConvNextV2Layer(
312
+ config,
313
+ dim=out_channels,
314
+ drop_path=drop_path_rates[j],
315
+ name=f"layers.{j}",
316
+ )
317
+ for j in range(depth)
318
+ ]
319
+ self.in_channels = in_channels
320
+ self.out_channels = out_channels
321
+ self.stride = stride
322
+
323
+ def call(self, hidden_states):
324
+ for layer in self.downsampling_layer:
325
+ hidden_states = layer(hidden_states)
326
+ for layer in self.layers:
327
+ hidden_states = layer(hidden_states)
328
+ return hidden_states
329
+
330
+ def build(self, input_shape=None):
331
+ if self.built:
332
+ return
333
+ self.built = True
334
+ if getattr(self, "layers", None) is not None:
335
+ for layer in self.layers:
336
+ with tf.name_scope(layer.name):
337
+ layer.build(None)
338
+ if self.in_channels != self.out_channels or self.stride > 1:
339
+ with tf.name_scope(self.downsampling_layer[0].name):
340
+ self.downsampling_layer[0].build([None, None, None, self.in_channels])
341
+ with tf.name_scope(self.downsampling_layer[1].name):
342
+ self.downsampling_layer[1].build([None, None, None, self.in_channels])
343
+
344
+
345
+ class TFConvNextV2Encoder(keras.layers.Layer):
346
+ def __init__(self, config: ConvNextV2Config, **kwargs):
347
+ super().__init__(**kwargs)
348
+ self.stages = []
349
+ drop_path_rates = tf.linspace(0.0, config.drop_path_rate, sum(config.depths))
350
+ drop_path_rates = tf.split(drop_path_rates, config.depths)
351
+ drop_path_rates = [x.numpy().tolist() for x in drop_path_rates]
352
+ prev_chs = config.hidden_sizes[0]
353
+ for i in range(config.num_stages):
354
+ out_chs = config.hidden_sizes[i]
355
+ stage = TFConvNextV2Stage(
356
+ config,
357
+ in_channels=prev_chs,
358
+ out_channels=out_chs,
359
+ stride=2 if i > 0 else 1,
360
+ depth=config.depths[i],
361
+ drop_path_rates=drop_path_rates[i],
362
+ name=f"stages.{i}",
363
+ )
364
+ self.stages.append(stage)
365
+ prev_chs = out_chs
366
+
367
+ def call(
368
+ self,
369
+ hidden_states: tf.Tensor,
370
+ output_hidden_states: Optional[bool] = False,
371
+ return_dict: Optional[bool] = True,
372
+ ) -> Union[Tuple, TFBaseModelOutputWithNoAttention]:
373
+ all_hidden_states = () if output_hidden_states else None
374
+
375
+ for i, layer_module in enumerate(self.stages):
376
+ if output_hidden_states:
377
+ all_hidden_states = all_hidden_states + (hidden_states,)
378
+
379
+ hidden_states = layer_module(hidden_states)
380
+
381
+ if output_hidden_states:
382
+ all_hidden_states = all_hidden_states + (hidden_states,)
383
+
384
+ if not return_dict:
385
+ return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
386
+
387
+ return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
388
+
389
+ def build(self, input_shape=None):
390
+ for stage in self.stages:
391
+ with tf.name_scope(stage.name):
392
+ stage.build(None)
393
+
394
+
395
+ @keras_serializable
396
+ class TFConvNextV2MainLayer(keras.layers.Layer):
397
+ config_class = ConvNextV2Config
398
+
399
+ def __init__(self, config: ConvNextV2Config, **kwargs):
400
+ super().__init__(**kwargs)
401
+
402
+ self.config = config
403
+ self.embeddings = TFConvNextV2Embeddings(config, name="embeddings")
404
+ self.encoder = TFConvNextV2Encoder(config, name="encoder")
405
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
406
+ # We are setting the `data_format` like so because from here on we will revert to the
407
+ # NCHW output format
408
+ self.pooler = keras.layers.GlobalAvgPool2D(data_format="channels_last")
409
+
410
+ @unpack_inputs
411
+ def call(
412
+ self,
413
+ pixel_values: TFModelInputType | None = None,
414
+ output_hidden_states: Optional[bool] = None,
415
+ return_dict: Optional[bool] = None,
416
+ training: bool = False,
417
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
418
+ output_hidden_states = (
419
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
420
+ )
421
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
422
+
423
+ if pixel_values is None:
424
+ raise ValueError("You have to specify pixel_values")
425
+
426
+ embedding_output = self.embeddings(pixel_values, training=training)
427
+
428
+ encoder_outputs = self.encoder(
429
+ embedding_output,
430
+ output_hidden_states=output_hidden_states,
431
+ return_dict=return_dict,
432
+ training=training,
433
+ )
434
+
435
+ last_hidden_state = encoder_outputs[0]
436
+
437
+ # Change to NCHW output format have uniformity in the modules
438
+ pooled_output = self.pooler(last_hidden_state)
439
+ last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
440
+ pooled_output = self.layernorm(pooled_output)
441
+
442
+ # Change the other hidden state outputs to NCHW as well
443
+ if output_hidden_states:
444
+ hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
445
+
446
+ if not return_dict:
447
+ hidden_states = hidden_states if output_hidden_states else ()
448
+ return (last_hidden_state, pooled_output) + hidden_states
449
+
450
+ return TFBaseModelOutputWithPoolingAndNoAttention(
451
+ last_hidden_state=last_hidden_state,
452
+ pooler_output=pooled_output,
453
+ hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
454
+ )
455
+
456
+ def build(self, input_shape=None):
457
+ if self.built:
458
+ return
459
+ self.built = True
460
+ if getattr(self, "embeddings", None) is not None:
461
+ with tf.name_scope(self.embeddings.name):
462
+ self.embeddings.build(None)
463
+ if getattr(self, "encoder", None) is not None:
464
+ with tf.name_scope(self.encoder.name):
465
+ self.encoder.build(None)
466
+ if getattr(self, "layernorm", None) is not None:
467
+ with tf.name_scope(self.layernorm.name):
468
+ self.layernorm.build([None, self.config.hidden_sizes[-1]])
469
+
470
+
471
+ class TFConvNextV2PreTrainedModel(TFPreTrainedModel):
472
+ """
473
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
474
+ models.
475
+ """
476
+
477
+ config_class = ConvNextV2Config
478
+ base_model_prefix = "convnextv2"
479
+ main_input_name = "pixel_values"
480
+
481
+
482
+ CONVNEXTV2_START_DOCSTRING = r"""
483
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
484
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
485
+ etc.)
486
+
487
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
488
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
489
+ behavior.
490
+
491
+ <Tip>
492
+
493
+ TensorFlow models and layers in `transformers` accept two formats as input:
494
+
495
+ - having all inputs as keyword arguments (like PyTorch models), or
496
+ - having all inputs as a list, tuple or dict in the first positional argument.
497
+
498
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
499
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
500
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
501
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
502
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
503
+ positional argument:
504
+
505
+ - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
506
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
507
+ `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
508
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
509
+ `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
510
+
511
+ Note that when creating models and layers with
512
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
513
+ about any of this, as you can just pass inputs like you would to any other Python function!
514
+
515
+ </Tip>
516
+
517
+ Parameters:
518
+ config ([`ConvNextV2Config`]): Model configuration class with all the parameters of the model.
519
+ Initializing with a config file does not load the weights associated with the model, only the
520
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
521
+ """
522
+
523
+ CONVNEXTV2_INPUTS_DOCSTRING = r"""
524
+ Args:
525
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
526
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
527
+ [`ConvNextImageProcessor.__call__`] for details.
528
+
529
+ output_hidden_states (`bool`, *optional*):
530
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
531
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
532
+ used instead.
533
+ return_dict (`bool`, *optional*):
534
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
535
+ eager mode, in graph mode the value will always be set to `True`.
536
+ """
537
+
538
+
539
+ @add_start_docstrings(
540
+ "The bare ConvNextV2 model outputting raw features without any specific head on top.",
541
+ CONVNEXTV2_START_DOCSTRING,
542
+ )
543
+ class TFConvNextV2Model(TFConvNextV2PreTrainedModel):
544
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
545
+ super().__init__(config, *inputs, **kwargs)
546
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
547
+
548
+ @unpack_inputs
549
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
550
+ @add_code_sample_docstrings(
551
+ checkpoint=_CHECKPOINT_FOR_DOC,
552
+ output_type=TFBaseModelOutputWithPoolingAndNoAttention,
553
+ config_class=_CONFIG_FOR_DOC,
554
+ modality="vision",
555
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
556
+ )
557
+ def call(
558
+ self,
559
+ pixel_values: TFModelInputType | None = None,
560
+ output_hidden_states: Optional[bool] = None,
561
+ return_dict: Optional[bool] = None,
562
+ training: bool = False,
563
+ ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
564
+ output_hidden_states = (
565
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
566
+ )
567
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
568
+
569
+ if pixel_values is None:
570
+ raise ValueError("You have to specify pixel_values")
571
+
572
+ outputs = self.convnextv2(
573
+ pixel_values=pixel_values,
574
+ output_hidden_states=output_hidden_states,
575
+ return_dict=return_dict,
576
+ training=training,
577
+ )
578
+
579
+ if not return_dict:
580
+ return outputs[:]
581
+
582
+ return TFBaseModelOutputWithPoolingAndNoAttention(
583
+ last_hidden_state=outputs.last_hidden_state,
584
+ pooler_output=outputs.pooler_output,
585
+ hidden_states=outputs.hidden_states,
586
+ )
587
+
588
+ def build(self, input_shape=None):
589
+ if self.built:
590
+ return
591
+ self.built = True
592
+ if getattr(self, "convnextv2", None) is not None:
593
+ with tf.name_scope(self.convnextv2.name):
594
+ self.convnextv2.build(None)
595
+
596
+
597
+ @add_start_docstrings(
598
+ """
599
+ ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
600
+ ImageNet.
601
+ """,
602
+ CONVNEXTV2_START_DOCSTRING,
603
+ )
604
+ class TFConvNextV2ForImageClassification(TFConvNextV2PreTrainedModel, TFSequenceClassificationLoss):
605
+ def __init__(self, config: ConvNextV2Config, *inputs, **kwargs):
606
+ super().__init__(config, *inputs, **kwargs)
607
+
608
+ self.num_labels = config.num_labels
609
+ self.convnextv2 = TFConvNextV2MainLayer(config, name="convnextv2")
610
+
611
+ # Classifier head
612
+ self.classifier = keras.layers.Dense(
613
+ units=config.num_labels,
614
+ kernel_initializer=get_initializer(config.initializer_range),
615
+ bias_initializer=keras.initializers.Zeros(),
616
+ name="classifier",
617
+ )
618
+
619
+ @unpack_inputs
620
+ @add_start_docstrings_to_model_forward(CONVNEXTV2_INPUTS_DOCSTRING)
621
+ @add_code_sample_docstrings(
622
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
623
+ output_type=TFImageClassifierOutputWithNoAttention,
624
+ config_class=_CONFIG_FOR_DOC,
625
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
626
+ )
627
+ def call(
628
+ self,
629
+ pixel_values: TFModelInputType | None = None,
630
+ output_hidden_states: Optional[bool] = None,
631
+ return_dict: Optional[bool] = None,
632
+ labels: np.ndarray | tf.Tensor | None = None,
633
+ training: Optional[bool] = False,
634
+ ) -> Union[TFImageClassifierOutputWithNoAttention, Tuple[tf.Tensor]]:
635
+ r"""
636
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
637
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
638
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
639
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
640
+ """
641
+ output_hidden_states = (
642
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
643
+ )
644
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
645
+
646
+ if pixel_values is None:
647
+ raise ValueError("You have to specify pixel_values")
648
+
649
+ outputs = self.convnextv2(
650
+ pixel_values,
651
+ output_hidden_states=output_hidden_states,
652
+ return_dict=return_dict,
653
+ training=training,
654
+ )
655
+
656
+ pooled_output = outputs.pooler_output if return_dict else outputs[1]
657
+
658
+ logits = self.classifier(pooled_output)
659
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
660
+
661
+ if not return_dict:
662
+ output = (logits,) + outputs[2:]
663
+ return ((loss,) + output) if loss is not None else output
664
+
665
+ return TFImageClassifierOutputWithNoAttention(
666
+ loss=loss,
667
+ logits=logits,
668
+ hidden_states=outputs.hidden_states,
669
+ )
670
+
671
+ def build(self, input_shape=None):
672
+ if self.built:
673
+ return
674
+ self.built = True
675
+ if getattr(self, "convnextv2", None) is not None:
676
+ with tf.name_scope(self.convnextv2.name):
677
+ self.convnextv2.build(None)
678
+ if getattr(self, "classifier", None) is not None:
679
+ with tf.name_scope(self.classifier.name):
680
+ self.classifier.build([None, None, self.config.hidden_sizes[-1]])
681
+
682
+
683
+ __all__ = ["TFConvNextV2ForImageClassification", "TFConvNextV2Model", "TFConvNextV2PreTrainedModel"]
janus/lib/python3.10/site-packages/transformers/models/emu3/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_emu3 import *
22
+ from .image_processing_emu3 import *
23
+ from .modeling_emu3 import *
24
+ from .processing_emu3 import *
25
+ else:
26
+ import sys
27
+
28
+ _file = globals()["__file__"]
29
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/configuration_emu3.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/image_processing_emu3.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/modular_emu3.cpython-310.pyc ADDED
Binary file (45.4 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/emu3/__pycache__/processing_emu3.cpython-310.pyc ADDED
Binary file (8.56 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/emu3/configuration_emu3.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...modeling_rope_utils import rope_config_validation
21
+
22
+
23
+ class Emu3VQVAEConfig(PretrainedConfig):
24
+ r"""
25
+ This is the configuration class to store the configuration of a [`Emu3VQVAE`]. It is used to instantiate an VQ-VAE
26
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
27
+ defaults will yield a configuration to the VQ model presented in Emu3 paper.
28
+
29
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
30
+ documentation from [`PretrainedConfig`] for more information.
31
+ Args:
32
+ codebook_size (`int`, *optional*, defaults to 32768):
33
+ Codebook size of the VQ model.
34
+ embed_dim (`int`, *optional*, defaults to 4):
35
+ Dimension of the quantized vector in codebook.
36
+ latent_channels (`int`, *optional*, defaults to 4):
37
+ Dimension of the output channel of encoder and the input channel of decoder
38
+ double_latent (`bool`, *optional*, defaults to `False`):
39
+ Whether double the output dim of the encoder.
40
+ in_channels (`int`, *optional*, defaults to 3):
41
+ Input channel of encoder.
42
+ out_channels (`int`, *optional*, defaults to 3):
43
+ Output channel of decoder.
44
+ temporal_downsample_factor (`int`, *optional*, defaults to 4):
45
+ Temporal downsample factor.
46
+ base_channels (`int`, *optional*, defaults to 256):
47
+ Basic channel number of the intermediate blocks.
48
+ channel_multiplier (`List[int]`, *optional*, defaults to `[1, 2, 2, 4]`):
49
+ Channel scaling factor of the intermediate blocks.
50
+ num_res_blocks (`int`, *optional*, defaults to 2):
51
+ Residual block number in each stage.
52
+ attn_resolutions (`List[int]`, *optional*, defaults to `[3]`):
53
+ Stage indices to apply attention.
54
+ hidden_size (`int`, *optional*, defaults to 1024):
55
+ Dimension of the hidden representations in the attention layer.
56
+ num_attention_heads (`int`, *optional*, defaults to 1):
57
+ Number of attention heads for each attention layer.
58
+ attention_dropout (`float`, *optional*, defaults to 0.0):
59
+ The dropout ratio for the attention probabilities.
60
+
61
+ ```python
62
+ >>> from transformers import Emu3VQVAE, Emu3VQVAEConfig
63
+
64
+ >>> # Initializing a video VQ model of Emu3 configuration
65
+ >>> configuration = Emu3VQVAEConfig()
66
+
67
+ >>> # Initializing a model from the Emu3 VQ model style configuration
68
+ >>> model = Emu3VQVAE(configuration)
69
+
70
+ >>> # Accessing the model configuration
71
+ >>> configuration = model.config
72
+ ```"""
73
+
74
+ model_type = "emu3_vqgan"
75
+ base_config_key = "vq_config"
76
+
77
+ def __init__(
78
+ self,
79
+ codebook_size: int = 32768,
80
+ embed_dim: int = 4,
81
+ latent_channels: int = 4,
82
+ double_latent: bool = False,
83
+ in_channels: int = 3,
84
+ out_channels: int = 3,
85
+ temporal_downsample_factor: int = 4,
86
+ base_channels: int = 256,
87
+ channel_multiplier: List[int] = [1, 2, 2, 4],
88
+ num_res_blocks: int = 2,
89
+ attn_resolutions: List[int] = [3],
90
+ hidden_size: int = 1024,
91
+ num_attention_heads: int = 1,
92
+ attention_dropout: float = 0.0,
93
+ **kwargs,
94
+ ):
95
+ super().__init__(**kwargs)
96
+
97
+ self.codebook_size = codebook_size
98
+ self.embed_dim = embed_dim
99
+ self.latent_channels = latent_channels
100
+ self.double_latent = double_latent
101
+ self.in_channels = in_channels
102
+ self.out_channels = out_channels
103
+ self.temporal_downsample_factor = temporal_downsample_factor
104
+ self.base_channels = base_channels
105
+ self.channel_multiplier = channel_multiplier
106
+ self.num_res_blocks = num_res_blocks
107
+ self.attn_resolutions = attn_resolutions
108
+ self.hidden_size = hidden_size
109
+ self.num_attention_heads = num_attention_heads
110
+ self.attention_dropout = attention_dropout
111
+
112
+
113
+ class Emu3TextConfig(PretrainedConfig):
114
+ r"""
115
+ This is the configuration class to store the configuration of a [`Emu3TextModel`]. It is used to instantiate a
116
+ emu3 model according to the specified arguments, defining the model architecture. Instantiating a
117
+ configuration with the defaults will yield a similar configuration to that of the
118
+ [Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
119
+
120
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
121
+ documentation from [`PretrainedConfig`] for more information.
122
+
123
+
124
+ Args:
125
+ vocab_size (`int`, *optional*, defaults to 184622):
126
+ Vocabulary size of the Emu3 model. Defines the number of different tokens that can be represented by the
127
+ `inputs_ids` passed when calling [`Emu3Model`]
128
+ hidden_size (`int`, *optional*, defaults to 4096):
129
+ Dimension of the hidden representations.
130
+ intermediate_size (`int`, *optional*, defaults to 14336):
131
+ Dimension of the MLP representations.
132
+ num_hidden_layers (`int`, *optional*, defaults to 32):
133
+ Number of hidden layers in the Transformer decoder.
134
+ num_attention_heads (`int`, *optional*, defaults to 32):
135
+ Number of attention heads for each attention layer in the Transformer decoder.
136
+ num_key_value_heads (`int`, *optional*, defaults to 8):
137
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
138
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
139
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
140
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
141
+ by meanpooling all the original heads within that group. For more details checkout [this
142
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
143
+ `num_attention_heads`.
144
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
145
+ The non-linear activation function (function or string) in the decoder.
146
+ max_position_embeddings (`int`, *optional*, defaults to 9216):
147
+ The maximum sequence length that this model might ever be used with. Emu supports up to 9216 tokens,
148
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
149
+ The epsilon used by the rms normalization layers.
150
+ use_cache (`bool`, *optional*, defaults to `True`):
151
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
152
+ relevant if `config.is_decoder=True`.
153
+ pad_token_id (`int`, *optional*, defaults to 151643):
154
+ Padding token id.
155
+ bos_token_id (`int`, *optional*, defaults to 151849):
156
+ Beginning of stream token id.
157
+ eos_token_id (`int`, *optional*, defaults to 151850):
158
+ End of stream token id.
159
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
160
+ Whether to tie weight embeddings
161
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
162
+ The base period of the RoPE embeddings.
163
+ rope_scaling (`Dict`, *optional*):
164
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
165
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
166
+ accordingly.
167
+ Expected contents:
168
+ `rope_type` (`str`):
169
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
170
+ 'llama3'], with 'default' being the original RoPE implementation.
171
+ `factor` (`float`, *optional*):
172
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
173
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
174
+ original maximum pre-trained length.
175
+ `original_max_position_embeddings` (`int`, *optional*):
176
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
177
+ pretraining.
178
+ `attention_factor` (`float`, *optional*):
179
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
180
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
181
+ `factor` field to infer the suggested value.
182
+ `beta_fast` (`float`, *optional*):
183
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
184
+ ramp function. If unspecified, it defaults to 32.
185
+ `beta_slow` (`float`, *optional*):
186
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
187
+ ramp function. If unspecified, it defaults to 1.
188
+ `short_factor` (`List[float]`, *optional*):
189
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
190
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
191
+ size divided by the number of attention heads divided by 2
192
+ `long_factor` (`List[float]`, *optional*):
193
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
194
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
195
+ size divided by the number of attention heads divided by 2
196
+ `low_freq_factor` (`float`, *optional*):
197
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
198
+ `high_freq_factor` (`float`, *optional*):
199
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
200
+ mlp_bias (`bool`, *optional*, defaults to `False`):
201
+ Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
202
+ attention_bias (`bool`, *optional*, defaults to `False`):
203
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
204
+ attention_dropout (`float`, *optional*, defaults to 0.1):
205
+ The dropout ratio for the attention probabilities.
206
+ initializer_range (`float`, *optional*, defaults to 0.02):
207
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
208
+
209
+
210
+ ```python
211
+ >>> from transformers import Emu3Model, Emu3Config
212
+
213
+ >>> # Initializing a Emu3-community/Emu3-Chat-hf style configuration
214
+ >>> configuration = Emu3Config()
215
+
216
+ >>> # Initializing a model from the Emu3-community/Emu3-Chat-hf style configuration
217
+ >>> model = Emu3Model(configuration)
218
+
219
+ >>> # Accessing the model configuration
220
+ >>> configuration = model.config
221
+ ```"""
222
+
223
+ model_type = "emu3_text_model"
224
+ base_config_key = "text_config"
225
+ keys_to_ignore_at_inference = ["past_key_values"]
226
+
227
+ def __init__(
228
+ self,
229
+ vocab_size: int = 184622,
230
+ hidden_size: int = 4096,
231
+ intermediate_size: int = 14336,
232
+ num_hidden_layers: int = 32,
233
+ num_attention_heads: int = 32,
234
+ num_key_value_heads: Optional[int] = 8,
235
+ hidden_act: str = "silu",
236
+ max_position_embeddings: int = 9216,
237
+ rms_norm_eps: float = 1e-5,
238
+ use_cache: bool = True,
239
+ pad_token_id: int = 151643,
240
+ bos_token_id: int = 151849,
241
+ eos_token_id: int = 151850,
242
+ tie_word_embeddings: bool = False,
243
+ rope_theta: float = 1000000.0,
244
+ rope_scaling: Optional = None,
245
+ mlp_bias=False,
246
+ attention_bias=False,
247
+ attention_dropout: float = 0.1,
248
+ initializer_range: float = 0.02,
249
+ **kwargs,
250
+ ):
251
+ self.vocab_size = vocab_size
252
+ self.max_position_embeddings = max_position_embeddings
253
+ self.hidden_size = hidden_size
254
+ self.intermediate_size = intermediate_size
255
+ self.num_hidden_layers = num_hidden_layers
256
+ self.num_attention_heads = num_attention_heads
257
+ self.num_key_value_heads = num_key_value_heads
258
+ self.hidden_act = hidden_act
259
+ self.rms_norm_eps = rms_norm_eps
260
+ self.use_cache = use_cache
261
+ self.rope_theta = rope_theta
262
+ self.rope_scaling = rope_scaling
263
+ self.mlp_bias = mlp_bias
264
+ self.attention_bias = attention_bias
265
+ self.initializer_range = initializer_range
266
+ rope_config_validation(self)
267
+
268
+ self.attention_dropout = attention_dropout
269
+
270
+ super().__init__(
271
+ pad_token_id=pad_token_id,
272
+ bos_token_id=bos_token_id,
273
+ eos_token_id=eos_token_id,
274
+ tie_word_embeddings=tie_word_embeddings,
275
+ **kwargs,
276
+ )
277
+
278
+
279
+ class Emu3Config(PretrainedConfig):
280
+ """
281
+ This is the configuration class to store the configuration of a [`Emu3Model`]. It is used to instantiate a
282
+ emu3 model according to the specified arguments, defining the model architecture. Instantiating a
283
+ configuration with the defaults will yield a similar configuration to that of the
284
+ [Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).
285
+
286
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
287
+ documentation from [`PretrainedConfig`] for more information.
288
+
289
+
290
+ Args:
291
+ vq_config (`Union[Dict, Emu3VQVAEConfig]`, *optional*):
292
+ Emu3VQVAEConfig instance containing the configuration for the VQ-VAE model.
293
+ text_config (`Union[Dict, Emu3TextConfig]``, *optional*):
294
+ Emu3TextConfig instance containing the configuration for the language model.
295
+ vocabulary_map (`dict`, *optional*):
296
+ A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs.
297
+ """
298
+
299
+ model_type = "emu3"
300
+ keys_to_ignore_at_inference = ["past_key_values"]
301
+ sub_configs = {"text_config": Emu3TextConfig, "vq_config": Emu3VQVAEConfig}
302
+
303
+ def __init__(
304
+ self,
305
+ vq_config: Union[Dict, Emu3VQVAEConfig] = None,
306
+ text_config: Union[Dict, Emu3TextConfig] = None,
307
+ vocabulary_map: Dict[int, int] = None,
308
+ **kwargs,
309
+ ):
310
+ if vq_config is None:
311
+ vq_config = Emu3VQVAEConfig()
312
+ elif isinstance(vq_config, dict):
313
+ vq_config = Emu3VQVAEConfig(**vq_config)
314
+
315
+ if text_config is None:
316
+ text_config = Emu3TextConfig()
317
+ elif isinstance(text_config, dict):
318
+ text_config = Emu3TextConfig(**text_config)
319
+
320
+ self.vq_config = vq_config
321
+ self.text_config = text_config
322
+ self.vocabulary_map = vocabulary_map
323
+
324
+ super().__init__(**kwargs)
325
+
326
+
327
+ __all__ = ["Emu3Config", "Emu3TextConfig", "Emu3VQVAEConfig"]
janus/lib/python3.10/site-packages/transformers/models/emu3/image_processing_emu3.py ADDED
@@ -0,0 +1,552 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import math
18
+ from typing import Dict, Iterable, List, Optional, Union
19
+
20
+ import numpy as np
21
+
22
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature
23
+ from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format
24
+ from ...image_utils import (
25
+ OPENAI_CLIP_MEAN,
26
+ OPENAI_CLIP_STD,
27
+ ChannelDimension,
28
+ ImageInput,
29
+ PILImageResampling,
30
+ VideoInput,
31
+ get_image_size,
32
+ infer_channel_dimension_format,
33
+ is_scaled_image,
34
+ is_valid_image,
35
+ make_list_of_images,
36
+ to_numpy_array,
37
+ valid_images,
38
+ validate_preprocess_arguments,
39
+ )
40
+ from ...utils import TensorType, is_vision_available, logging
41
+
42
+
43
+ if is_vision_available():
44
+ from PIL import Image
45
+
46
+ logger = logging.get_logger(__name__)
47
+
48
+
49
+ def make_batched_images(images) -> List[List[ImageInput]]:
50
+ """
51
+ Accepts images in list or nested list format, and makes a list of images for preprocessing.
52
+
53
+ Args:
54
+ images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):
55
+ The input image.
56
+
57
+ Returns:
58
+ list: A list of images.
59
+ """
60
+ if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):
61
+ return [img for img_list in images for img in img_list]
62
+
63
+ elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):
64
+ return images
65
+
66
+ elif is_valid_image(images):
67
+ return [images]
68
+
69
+ raise ValueError(f"Could not make batched images from {images}")
70
+
71
+
72
+ def smart_resize(
73
+ height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280
74
+ ):
75
+ """Rescales the image so that the following conditions are met:
76
+
77
+ 1. Both dimensions (height and width) are divisible by 'factor'.
78
+
79
+ 2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
80
+
81
+ 3. The aspect ratio of the image is maintained as closely as possible.
82
+
83
+ """
84
+ if height < factor or width < factor:
85
+ raise ValueError(f"height:{height} or width:{width} must be larger than factor:{factor}")
86
+ elif max(height, width) / min(height, width) > 200:
87
+ raise ValueError(
88
+ f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
89
+ )
90
+ h_bar = round(height / factor) * factor
91
+ w_bar = round(width / factor) * factor
92
+ if h_bar * w_bar > max_pixels:
93
+ beta = math.sqrt((height * width) / max_pixels)
94
+ h_bar = math.floor(height / beta / factor) * factor
95
+ w_bar = math.floor(width / beta / factor) * factor
96
+ elif h_bar * w_bar < min_pixels:
97
+ beta = math.sqrt(min_pixels / (height * width))
98
+ h_bar = math.ceil(height * beta / factor) * factor
99
+ w_bar = math.ceil(width * beta / factor) * factor
100
+ return h_bar, w_bar
101
+
102
+
103
+ class Emu3ImageProcessor(BaseImageProcessor):
104
+ r"""
105
+ Constructs a Emu3 image processor that dynamically resizes images based on the original images.
106
+
107
+ Args:
108
+ do_resize (`bool`, *optional*, defaults to `True`):
109
+ Whether to resize the image's (height, width) dimensions.
110
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
111
+ Resampling filter to use when resizing the image.
112
+ do_rescale (`bool`, *optional*, defaults to `True`):
113
+ Whether to rescale the image by the specified scale `rescale_factor`.
114
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
115
+ Scale factor to use if rescaling the image.
116
+ do_normalize (`bool`, *optional*, defaults to `True`):
117
+ Whether to normalize the image.
118
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
119
+ Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
120
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
121
+ Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
122
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
123
+ Whether to convert the image to RGB.
124
+ do_pad (`bool`, *optional*, defaults to `True`):
125
+ Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
126
+ number of patches in the batch. Padding will be applied to the bottom and right with zeros.
127
+ min_pixels (`int`, *optional*, defaults to `512 * 512`):
128
+ The min pixels of the image to resize the image.
129
+ max_pixels (`int`, *optional*, defaults to `1024 * 1024`):
130
+ The max pixels of the image to resize the image.
131
+ spatial_factor (`int`, *optional*, defaults to 8):
132
+ The spatial downsample factor the image will be downsampled in feature extracting phase
133
+ """
134
+
135
+ model_input_names = ["pixel_values"]
136
+
137
+ def __init__(
138
+ self,
139
+ do_resize: bool = True,
140
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
141
+ do_rescale: bool = True,
142
+ rescale_factor: Union[int, float] = 1 / 255,
143
+ do_normalize: bool = True,
144
+ image_mean: Optional[Union[float, List[float]]] = None,
145
+ image_std: Optional[Union[float, List[float]]] = None,
146
+ do_convert_rgb: bool = True,
147
+ do_pad: bool = True,
148
+ min_pixels: int = 512 * 512,
149
+ max_pixels: int = 1024 * 1024,
150
+ spatial_factor: int = 8,
151
+ **kwargs,
152
+ ) -> None:
153
+ super().__init__(**kwargs)
154
+ self.do_resize = do_resize
155
+ self.resample = resample
156
+ self.do_rescale = do_rescale
157
+ self.rescale_factor = rescale_factor
158
+ self.do_normalize = do_normalize
159
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
160
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
161
+ self.min_pixels = min_pixels
162
+ self.max_pixels = max_pixels
163
+ self.spatial_factor = spatial_factor
164
+ self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
165
+ self.do_convert_rgb = do_convert_rgb
166
+
167
+ def _preprocess(
168
+ self,
169
+ images: Union[ImageInput, VideoInput],
170
+ do_resize: bool = None,
171
+ resample: PILImageResampling = None,
172
+ do_rescale: bool = None,
173
+ rescale_factor: float = None,
174
+ do_normalize: bool = None,
175
+ image_mean: Optional[Union[float, List[float]]] = None,
176
+ image_std: Optional[Union[float, List[float]]] = None,
177
+ do_convert_rgb: bool = None,
178
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
179
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
180
+ ):
181
+ """
182
+ Preprocess an image or batch of images.
183
+
184
+ Args:
185
+ images (`ImageInput`):
186
+ Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
187
+ vision_info (`List[Dict]`, *optional*):
188
+ Optional list of dictionaries containing additional information about vision inputs.
189
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
190
+ Whether to resize the image.
191
+ resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
192
+ Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
193
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
194
+ Whether to rescale the image.
195
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
196
+ Scale factor to use if rescaling the image.
197
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
198
+ Whether to normalize the image.
199
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
200
+ Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
201
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
202
+ Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
203
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
204
+ Whether to convert the image to RGB.
205
+ data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
206
+ The channel dimension format for the output image. Can be one of:
207
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
208
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
209
+ - Unset: Use the channel dimension format of the input image.
210
+ input_data_format (`ChannelDimension` or `str`, *optional*):
211
+ The channel dimension format for the input image. Can be one of:
212
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
213
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
214
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
215
+ """
216
+ images = make_list_of_images(images)
217
+
218
+ if do_convert_rgb:
219
+ images = [convert_to_rgb(image) for image in images]
220
+
221
+ # All transformations expect numpy arrays.
222
+ images = [to_numpy_array(image) for image in images]
223
+
224
+ if is_scaled_image(images[0]) and do_rescale:
225
+ logger.warning_once(
226
+ "It looks like you are trying to rescale already rescaled images. If the input"
227
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
228
+ )
229
+ if input_data_format is None:
230
+ # We assume that all images have the same channel dimension format.
231
+ input_data_format = infer_channel_dimension_format(images[0])
232
+
233
+ height, width = get_image_size(images[0], channel_dim=input_data_format)
234
+ resized_height, resized_width = height, width
235
+ processed_images = []
236
+ for image in images:
237
+ if do_resize:
238
+ resized_height, resized_width = smart_resize(
239
+ height,
240
+ width,
241
+ factor=self.spatial_factor,
242
+ min_pixels=self.min_pixels,
243
+ max_pixels=self.max_pixels,
244
+ )
245
+ image = resize(
246
+ image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
247
+ )
248
+
249
+ if do_rescale:
250
+ image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
251
+
252
+ if do_normalize:
253
+ image = self.normalize(
254
+ image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
255
+ )
256
+
257
+ image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
258
+ processed_images.append(image)
259
+
260
+ images = np.array(processed_images)
261
+ return images
262
+
263
+ def _pad_for_batching(
264
+ self,
265
+ pixel_values: List[np.ndarray],
266
+ image_sizes: List[List[int]],
267
+ data_format: Optional[Union[str, ChannelDimension]] = None,
268
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
269
+ ):
270
+ """
271
+ Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
272
+
273
+ Args:
274
+ pixel_values (`List[np.ndarray]`):
275
+ An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
276
+ image_sizes (`List[List[int]]`):
277
+ A list of sizes for each image in `pixel_values` in (height, width) format.
278
+ data_format (`str` or `ChannelDimension`, *optional*):
279
+ The channel dimension format for the output image. Can be one of:
280
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
281
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
282
+ If unset, will use same as the input image.
283
+ input_data_format (`str` or `ChannelDimension`, *optional*):
284
+ The channel dimension format for the input image. Can be one of:
285
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
286
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
287
+ If unset, will use the inferred format of the input image.
288
+
289
+ Returns:
290
+ List[`np.ndarray`]: The padded images.
291
+ """
292
+
293
+ max_shape = (
294
+ max([size[0] for size in image_sizes]),
295
+ max([size[1] for size in image_sizes]),
296
+ )
297
+ pixel_values = [
298
+ pad(
299
+ image,
300
+ padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])),
301
+ data_format=data_format,
302
+ input_data_format=input_data_format,
303
+ )
304
+ for image, size in zip(pixel_values, image_sizes)
305
+ ]
306
+ return pixel_values
307
+
308
+ def preprocess(
309
+ self,
310
+ images: ImageInput,
311
+ do_resize: bool = None,
312
+ size: Dict[str, int] = None,
313
+ resample: PILImageResampling = None,
314
+ do_rescale: bool = None,
315
+ rescale_factor: float = None,
316
+ do_normalize: bool = None,
317
+ image_mean: Optional[Union[float, List[float]]] = None,
318
+ image_std: Optional[Union[float, List[float]]] = None,
319
+ do_convert_rgb: bool = None,
320
+ do_pad: bool = True,
321
+ return_tensors: Optional[Union[str, TensorType]] = None,
322
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
323
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
324
+ ):
325
+ """
326
+ Args:
327
+ images (`ImageInput`):
328
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
329
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
330
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
331
+ Whether to resize the image.
332
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
333
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
334
+ the longest edge resized to keep the input aspect ratio.
335
+ resample (`int`, *optional*, defaults to `self.resample`):
336
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
337
+ has an effect if `do_resize` is set to `True`.
338
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
339
+ Whether to rescale the image.
340
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
341
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
342
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
343
+ Whether to normalize the image.
344
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
345
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
346
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
347
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
348
+ `True`.
349
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
350
+ Whether to convert the image to RGB.
351
+ do_pad (`bool`, *optional*, defaults to `True`):
352
+ Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
353
+ number of patches in the batch. Padding will be applied to the bottom and right with zeros.
354
+ return_tensors (`str` or `TensorType`, *optional*):
355
+ The type of tensors to return. Can be one of:
356
+ - Unset: Return a list of `np.ndarray`.
357
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
358
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
359
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
360
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
361
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
362
+ The channel dimension format for the output image. Can be one of:
363
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
364
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
365
+ - Unset: Use the channel dimension format of the input image.
366
+ input_data_format (`ChannelDimension` or `str`, *optional*):
367
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
368
+ from the input image. Can be one of:
369
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
370
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
371
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
372
+
373
+ """
374
+ do_resize = do_resize if do_resize is not None else self.do_resize
375
+ size = size if size is not None else self.size
376
+ resample = resample if resample is not None else self.resample
377
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
378
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
379
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
380
+ image_mean = image_mean if image_mean is not None else self.image_mean
381
+ image_std = image_std if image_std is not None else self.image_std
382
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
383
+ do_pad = do_pad if do_pad is not None else self.do_pad
384
+
385
+ if images is not None:
386
+ images = make_batched_images(images)
387
+
388
+ if images is not None and not valid_images(images):
389
+ raise ValueError(
390
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
391
+ "torch.Tensor, tf.Tensor or jax.ndarray."
392
+ )
393
+
394
+ validate_preprocess_arguments(
395
+ rescale_factor=rescale_factor,
396
+ do_normalize=do_normalize,
397
+ image_mean=image_mean,
398
+ image_std=image_std,
399
+ do_resize=do_resize,
400
+ size=size,
401
+ resample=resample,
402
+ )
403
+
404
+ pixel_values = []
405
+ for image in images:
406
+ image = self._preprocess(
407
+ image,
408
+ do_resize=do_resize,
409
+ resample=resample,
410
+ do_rescale=do_rescale,
411
+ rescale_factor=rescale_factor,
412
+ do_normalize=do_normalize,
413
+ image_mean=image_mean,
414
+ image_std=image_std,
415
+ data_format=data_format,
416
+ do_convert_rgb=do_convert_rgb,
417
+ input_data_format=input_data_format,
418
+ )
419
+ pixel_values.extend(image)
420
+
421
+ image_sizes = [image.shape[-2:] for image in pixel_values]
422
+ if do_pad:
423
+ pixel_values = self._pad_for_batching(pixel_values, image_sizes)
424
+ pixel_values = np.array(pixel_values)
425
+
426
+ return BatchFeature(
427
+ data={"pixel_values": pixel_values, "image_sizes": image_sizes}, tensor_type=return_tensors
428
+ )
429
+
430
+ def postprocess(
431
+ self,
432
+ images: ImageInput,
433
+ do_rescale: Optional[bool] = None,
434
+ rescale_factor: Optional[float] = None,
435
+ do_normalize: Optional[bool] = None,
436
+ image_mean: Optional[Union[float, List[float]]] = None,
437
+ image_std: Optional[Union[float, List[float]]] = None,
438
+ return_tensors: Union[str, TensorType] = "PIL.Image.Image",
439
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
440
+ ):
441
+ """
442
+ Postprocess an image or batch of images tensor. Postprocess is the reverse process of preprocess.
443
+ The parameters should be same as in preprocess.
444
+ Args:
445
+ images (`ImageInput`):
446
+ Image to postprocess. Expects a single or batch of images with pixel values ranging from -1 to 1.
447
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
448
+ Whether to rescale the image.
449
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
450
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
451
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
452
+ Whether to normalize the image.
453
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
454
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
455
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
456
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`.
457
+ return_tensors (`str` or `TensorType`, *optional*):
458
+ The type of tensors to return. Can be one of:
459
+ - Unset: Return a list of `np.ndarray`.
460
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
461
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
462
+ input_data_format (`ChannelDimension` or `str`, *optional*):
463
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
464
+ from the input image. Can be one of:
465
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
466
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
467
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
468
+ """
469
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
470
+ rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor
471
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
472
+ image_mean = image_mean if image_mean is not None else self.image_mean
473
+ image_std = image_std if image_std is not None else self.image_std
474
+
475
+ images = make_list_of_images(images)
476
+ if isinstance(images[0], Image.Image):
477
+ return images if len(images) > 1 else images[0]
478
+
479
+ if input_data_format is None:
480
+ # We assume that all images have the same channel dimension format.
481
+ input_data_format = infer_channel_dimension_format(images[0])
482
+
483
+ pixel_values = []
484
+ for image in images:
485
+ image = to_numpy_array(image)
486
+ if do_normalize:
487
+ image = self.unnormalize(
488
+ image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format
489
+ )
490
+
491
+ if do_rescale:
492
+ image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
493
+ image = image.clip(0, 255).astype(np.uint8)
494
+
495
+ if do_normalize and do_rescale and return_tensors == "PIL.Image.Image":
496
+ image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format)
497
+ pixel_values.append(Image.fromarray(image))
498
+ else:
499
+ pixel_values.extend(image)
500
+
501
+ data = {"pixel_values": pixel_values}
502
+ return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None
503
+
504
+ return BatchFeature(data=data, tensor_type=return_tensors)
505
+
506
+ def unnormalize(
507
+ self,
508
+ image: np.array,
509
+ image_mean: Union[float, Iterable[float]],
510
+ image_std: Union[float, Iterable[float]],
511
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
512
+ ) -> np.array:
513
+ """
514
+ Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`.
515
+ image = (image * image_std) + image_mean
516
+ Args:
517
+ image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`):
518
+ Batch of pixel values to postprocess.
519
+ image_mean (`float` or `Iterable[float]`):
520
+ The mean to use for unnormalization.
521
+ image_std (`float` or `Iterable[float]`):
522
+ The standard deviation to use for unnormalization.
523
+ input_data_format (`ChannelDimension` or `str`, *optional*):
524
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
525
+ from the input image. Can be one of:
526
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
527
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
528
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
529
+ """
530
+ num_channels = 3
531
+
532
+ if isinstance(image_mean, Iterable):
533
+ if len(image_mean) != num_channels:
534
+ raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}")
535
+ else:
536
+ image_mean = [image_mean] * num_channels
537
+
538
+ if isinstance(image_std, Iterable):
539
+ if len(image_std) != num_channels:
540
+ raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(image_std)}")
541
+ else:
542
+ image_std = [image_std] * num_channels
543
+
544
+ rev_image_mean = tuple(-mean / std for mean, std in zip(image_mean, image_std))
545
+ rev_image_std = tuple(1 / std for std in image_std)
546
+ image = self.normalize(
547
+ image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format
548
+ )
549
+ return image
550
+
551
+
552
+ __all__ = ["Emu3ImageProcessor"]
janus/lib/python3.10/site-packages/transformers/models/emu3/modeling_emu3.py ADDED
@@ -0,0 +1,1949 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/emu3/modular_emu3.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_emu3.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2024 HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+
23
+ import math
24
+ from functools import cached_property
25
+ from typing import Callable, List, Optional, Tuple, Union
26
+
27
+ import torch
28
+ import torch.nn as nn
29
+ import torch.nn.functional as F
30
+
31
+ from ...activations import ACT2FN
32
+ from ...cache_utils import Cache, DynamicCache, StaticCache
33
+ from ...generation import GenerationMixin
34
+ from ...modeling_attn_mask_utils import AttentionMaskConverter
35
+ from ...modeling_flash_attention_utils import FlashAttentionKwargs
36
+ from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
37
+ from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
38
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
39
+ from ...processing_utils import Unpack
40
+ from ...utils import (
41
+ LossKwargs,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+
53
+ _CONFIG_FOR_DOC = "Emu3Config"
54
+
55
+
56
+ class Emu3RMSNorm(nn.Module):
57
+ def __init__(self, hidden_size, eps=1e-6):
58
+ """
59
+ Emu3RMSNorm is equivalent to T5LayerNorm
60
+ """
61
+ super().__init__()
62
+ self.weight = nn.Parameter(torch.ones(hidden_size))
63
+ self.variance_epsilon = eps
64
+
65
+ def forward(self, hidden_states):
66
+ input_dtype = hidden_states.dtype
67
+ hidden_states = hidden_states.to(torch.float32)
68
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
69
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
70
+ return self.weight * hidden_states.to(input_dtype)
71
+
72
+ def extra_repr(self):
73
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
74
+
75
+
76
+ class Emu3MLP(nn.Module):
77
+ def __init__(self, config):
78
+ super().__init__()
79
+ self.config = config
80
+ self.hidden_size = config.hidden_size
81
+ self.intermediate_size = config.intermediate_size
82
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
83
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
84
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
85
+ self.act_fn = ACT2FN[config.hidden_act]
86
+
87
+ def forward(self, x):
88
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
89
+ return down_proj
90
+
91
+
92
+ def rotate_half(x):
93
+ """Rotates half the hidden dims of the input."""
94
+ x1 = x[..., : x.shape[-1] // 2]
95
+ x2 = x[..., x.shape[-1] // 2 :]
96
+ return torch.cat((-x2, x1), dim=-1)
97
+
98
+
99
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
100
+ """Applies Rotary Position Embedding to the query and key tensors.
101
+
102
+ Args:
103
+ q (`torch.Tensor`): The query tensor.
104
+ k (`torch.Tensor`): The key tensor.
105
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
106
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
107
+ position_ids (`torch.Tensor`, *optional*):
108
+ Deprecated and unused.
109
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
110
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
111
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
112
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
113
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
114
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
115
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
116
+ Returns:
117
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
118
+ """
119
+ cos = cos.unsqueeze(unsqueeze_dim)
120
+ sin = sin.unsqueeze(unsqueeze_dim)
121
+ q_embed = (q * cos) + (rotate_half(q) * sin)
122
+ k_embed = (k * cos) + (rotate_half(k) * sin)
123
+ return q_embed, k_embed
124
+
125
+
126
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
127
+ """
128
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
129
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
130
+ """
131
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
132
+ if n_rep == 1:
133
+ return hidden_states
134
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
135
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
136
+
137
+
138
+ def eager_attention_forward(
139
+ module: nn.Module,
140
+ query: torch.Tensor,
141
+ key: torch.Tensor,
142
+ value: torch.Tensor,
143
+ attention_mask: Optional[torch.Tensor],
144
+ scaling: float,
145
+ dropout: float = 0.0,
146
+ **kwargs,
147
+ ):
148
+ key_states = repeat_kv(key, module.num_key_value_groups)
149
+ value_states = repeat_kv(value, module.num_key_value_groups)
150
+
151
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
152
+ if attention_mask is not None:
153
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
154
+ attn_weights = attn_weights + causal_mask
155
+
156
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
157
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
158
+ attn_output = torch.matmul(attn_weights, value_states)
159
+ attn_output = attn_output.transpose(1, 2).contiguous()
160
+
161
+ return attn_output, attn_weights
162
+
163
+
164
+ class Emu3Attention(nn.Module):
165
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
166
+
167
+ def __init__(self, config: Emu3Config, layer_idx: int):
168
+ super().__init__()
169
+ self.config = config
170
+ self.layer_idx = layer_idx
171
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
172
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
173
+ self.scaling = self.head_dim**-0.5
174
+ self.attention_dropout = config.attention_dropout
175
+ self.is_causal = True
176
+
177
+ self.q_proj = nn.Linear(
178
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
179
+ )
180
+ self.k_proj = nn.Linear(
181
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
182
+ )
183
+ self.v_proj = nn.Linear(
184
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
185
+ )
186
+ self.o_proj = nn.Linear(
187
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
188
+ )
189
+
190
+ def forward(
191
+ self,
192
+ hidden_states: torch.Tensor,
193
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
194
+ attention_mask: Optional[torch.Tensor],
195
+ past_key_value: Optional[Cache] = None,
196
+ cache_position: Optional[torch.LongTensor] = None,
197
+ **kwargs: Unpack[FlashAttentionKwargs],
198
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
199
+ input_shape = hidden_states.shape[:-1]
200
+ hidden_shape = (*input_shape, -1, self.head_dim)
201
+
202
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
203
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
204
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
205
+
206
+ cos, sin = position_embeddings
207
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
208
+
209
+ if past_key_value is not None:
210
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
211
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
212
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
213
+
214
+ attention_interface: Callable = eager_attention_forward
215
+ if self.config._attn_implementation != "eager":
216
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
217
+ logger.warning_once(
218
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
219
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
220
+ )
221
+ else:
222
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
223
+
224
+ attn_output, attn_weights = attention_interface(
225
+ self,
226
+ query_states,
227
+ key_states,
228
+ value_states,
229
+ attention_mask,
230
+ dropout=0.0 if not self.training else self.attention_dropout,
231
+ scaling=self.scaling,
232
+ **kwargs,
233
+ )
234
+
235
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
236
+ attn_output = self.o_proj(attn_output)
237
+ return attn_output, attn_weights
238
+
239
+
240
+ class Emu3DecoderLayer(nn.Module):
241
+ def __init__(self, config: Emu3Config, layer_idx: int):
242
+ super().__init__()
243
+ self.hidden_size = config.hidden_size
244
+
245
+ self.self_attn = Emu3Attention(config=config, layer_idx=layer_idx)
246
+
247
+ self.mlp = Emu3MLP(config)
248
+ self.input_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
249
+ self.post_attention_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
250
+ self.dropout = nn.Dropout(config.attention_dropout)
251
+
252
+ def forward(
253
+ self,
254
+ hidden_states: torch.Tensor,
255
+ attention_mask: Optional[torch.Tensor] = None,
256
+ position_ids: Optional[torch.LongTensor] = None,
257
+ past_key_value: Optional[Cache] = None,
258
+ output_attentions: Optional[bool] = False,
259
+ use_cache: Optional[bool] = False,
260
+ cache_position: Optional[torch.LongTensor] = None,
261
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
262
+ **kwargs,
263
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
264
+ """
265
+ Args:
266
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
267
+ attention_mask (`torch.FloatTensor`, *optional*):
268
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
269
+ query_sequence_length, key_sequence_length)` if default attention is used.
270
+ output_attentions (`bool`, *optional*):
271
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
272
+ returned tensors for more detail.
273
+ use_cache (`bool`, *optional*):
274
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
275
+ (see `past_key_values`).
276
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
277
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
278
+ Indices depicting the position of the input sequence tokens in the sequence
279
+ kwargs (`dict`, *optional*):
280
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
281
+ into the model
282
+ """
283
+ residual = hidden_states
284
+
285
+ hidden_states = self.input_layernorm(hidden_states)
286
+
287
+ # Self Attention
288
+ hidden_states, self_attn_weights = self.self_attn(
289
+ hidden_states=hidden_states,
290
+ attention_mask=attention_mask,
291
+ position_ids=position_ids,
292
+ past_key_value=past_key_value,
293
+ output_attentions=output_attentions,
294
+ use_cache=use_cache,
295
+ cache_position=cache_position,
296
+ position_embeddings=position_embeddings,
297
+ **kwargs,
298
+ )
299
+ hidden_states = residual + self.dropout(hidden_states)
300
+
301
+ # Fully Connected
302
+ residual = hidden_states
303
+ hidden_states = self.post_attention_layernorm(hidden_states)
304
+ hidden_states = self.mlp(hidden_states)
305
+ hidden_states = residual + self.dropout(hidden_states)
306
+
307
+ outputs = (hidden_states,)
308
+
309
+ if output_attentions:
310
+ outputs += (self_attn_weights,)
311
+
312
+ return outputs
313
+
314
+
315
+ class Emu3VQVAEVectorQuantizer(nn.Module):
316
+ """
317
+ A module for vector quantization using learned embedding vectors.
318
+
319
+ This module implements the quantization process similar to te one described in
320
+ the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
321
+ input vectors into discrete codebook vectors, which are learned during training.
322
+ Current implementation improves over previous ones by avoiding costly matrix multiplications
323
+ and allowing for post-hoc remapping of indices.
324
+ """
325
+
326
+ def __init__(self, config: Emu3VQVAEConfig):
327
+ super().__init__()
328
+ self.embedding = nn.Embedding(config.codebook_size, config.embed_dim)
329
+ self.embedding.weight.data.uniform_(-1.0 / config.codebook_size, 1.0 / config.codebook_size)
330
+
331
+ def forward(self, hidden_state: torch.Tensor):
332
+ batch_size, temporal, channels, height, width = hidden_state.shape
333
+ hidden_state = hidden_state.permute(0, 1, 3, 4, 2).contiguous()
334
+ hidden_state_flattened = hidden_state.view(-1, channels)
335
+
336
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
337
+ hidden_state_sum = torch.sum(hidden_state_flattened**2, dim=1, keepdim=True)
338
+ embedding_sum = torch.sum(self.embedding.weight**2, dim=1)
339
+
340
+ # "bd,dn->bn",
341
+ distances = 2 * torch.matmul(hidden_state_flattened, self.embedding.weight.transpose(0, 1))
342
+ distances = hidden_state_sum + embedding_sum - distances
343
+
344
+ min_encoding_indices = torch.argmin(distances, dim=1)
345
+ min_encoding_indices = min_encoding_indices.view(batch_size, temporal, height, width)
346
+ return min_encoding_indices
347
+
348
+
349
+ class Emu3VQVAEEncoderConvDownsample(nn.Module):
350
+ def __init__(self, in_channels):
351
+ super().__init__()
352
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
353
+
354
+ def forward(self, hidden_states):
355
+ # no asymmetric padding in torch conv, must do it ourselves
356
+ hidden_states = F.pad(hidden_states, pad=(0, 1, 0, 1), mode="constant", value=0)
357
+ hidden_states = self.conv(hidden_states)
358
+ return hidden_states
359
+
360
+
361
+ class Emu3VQVAEEncoderConvUpsample(nn.Module):
362
+ def __init__(self, in_channels):
363
+ super().__init__()
364
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
365
+
366
+ def forward(self, hidden_states):
367
+ hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
368
+ hidden_states = self.conv(hidden_states)
369
+ return hidden_states
370
+
371
+
372
+ class Emu3VQVAEConv3d(nn.Module):
373
+ def __init__(
374
+ self,
375
+ in_channel: int,
376
+ out_channel: int,
377
+ kernel_size: Tuple[int],
378
+ stride: Tuple[int],
379
+ ):
380
+ super().__init__()
381
+
382
+ padding_sizes = [one_kernel - one_stride for one_kernel, one_stride in zip(kernel_size[1:], stride[1:])]
383
+ self.padding = ()
384
+ for pad_size in padding_sizes[::-1]:
385
+ self.padding += (pad_size // 2 + pad_size % 2, pad_size // 2)
386
+ self.padding += (2, 0)
387
+
388
+ self.conv = nn.Conv3d(
389
+ in_channel,
390
+ out_channel,
391
+ kernel_size,
392
+ stride=stride,
393
+ )
394
+
395
+ def forward(self, hidden_states: torch.Tensor):
396
+ hidden_states = F.pad(hidden_states, self.padding)
397
+ hidden_states = self.conv(hidden_states)
398
+ return hidden_states
399
+
400
+
401
+ class Emu3VQVAESpatialNorm(nn.Module):
402
+ def __init__(
403
+ self,
404
+ in_channels: int,
405
+ out_channels: int,
406
+ ):
407
+ super().__init__()
408
+ self.norm_layer = nn.GroupNorm(
409
+ num_channels=out_channels,
410
+ num_groups=32,
411
+ eps=1e-6,
412
+ affine=True,
413
+ )
414
+
415
+ self.conv_y = nn.Conv2d(
416
+ in_channels,
417
+ out_channels,
418
+ kernel_size=1,
419
+ stride=1,
420
+ padding=0,
421
+ )
422
+ self.conv_b = nn.Conv2d(
423
+ in_channels,
424
+ out_channels,
425
+ kernel_size=1,
426
+ stride=1,
427
+ padding=0,
428
+ )
429
+
430
+ def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
431
+ quant_states = F.interpolate(quant_states, size=hidden_states.shape[-2:], mode="nearest")
432
+ hidden_states = self.norm_layer(hidden_states)
433
+ hidden_states = hidden_states * self.conv_y(quant_states) + self.conv_b(quant_states)
434
+ return hidden_states
435
+
436
+
437
+ class Emu3VQVAETemporalUpsample(nn.Module):
438
+ def __init__(
439
+ self,
440
+ in_channel: int,
441
+ out_channel: int,
442
+ ):
443
+ super().__init__()
444
+ self.conv = Emu3VQVAEConv3d(
445
+ in_channel,
446
+ out_channel,
447
+ kernel_size=(3, 3, 3),
448
+ stride=(1, 1, 1),
449
+ )
450
+
451
+ def forward(self, hidden_states: torch.Tensor):
452
+ batch_size, channels, temporal, height, width = hidden_states.shape
453
+ hidden_states = hidden_states.permute(0, 1, 3, 4, 2).contiguous().view(batch_size, -1, temporal)
454
+ hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
455
+ hidden_states = hidden_states.view(batch_size, channels, height, width, -1).permute(0, 1, 4, 2, 3).contiguous()
456
+ hidden_states = self.conv(hidden_states)
457
+ return hidden_states
458
+
459
+
460
+ class Emu3VQVAETemporalDownsample(nn.Module):
461
+ def __init__(
462
+ self,
463
+ in_channel: int,
464
+ out_channel: int,
465
+ ):
466
+ super().__init__()
467
+ self.conv = Emu3VQVAEConv3d(
468
+ in_channel,
469
+ out_channel,
470
+ kernel_size=(4, 3, 3),
471
+ stride=(2, 1, 1),
472
+ )
473
+
474
+ def forward(self, hidden_states: torch.Tensor):
475
+ hidden_states = self.conv(hidden_states)
476
+ return hidden_states
477
+
478
+
479
+ class Emu3VQVAETemporalResnetBlock(nn.Module):
480
+ def __init__(
481
+ self,
482
+ in_channels,
483
+ out_channels=None,
484
+ ):
485
+ super().__init__()
486
+ self.in_channels = in_channels
487
+ self.out_channels = in_channels if out_channels is None else out_channels
488
+
489
+ self.norm1 = nn.BatchNorm3d(in_channels)
490
+ self.conv1 = Emu3VQVAEConv3d(
491
+ in_channels,
492
+ out_channels,
493
+ kernel_size=(3, 3, 3),
494
+ stride=(1, 1, 1),
495
+ )
496
+ self.norm2 = nn.BatchNorm3d(out_channels)
497
+ self.conv2 = Emu3VQVAEConv3d(
498
+ out_channels,
499
+ out_channels,
500
+ kernel_size=(3, 3, 3),
501
+ stride=(1, 1, 1),
502
+ )
503
+ if self.in_channels != self.out_channels:
504
+ self.nin_shortcut = nn.Conv3d(
505
+ in_channels,
506
+ out_channels,
507
+ kernel_size=1,
508
+ stride=1,
509
+ padding=0,
510
+ )
511
+
512
+ def forward(self, hidden_states):
513
+ residual = hidden_states
514
+ hidden_states = self.norm1(hidden_states)
515
+ hidden_states *= torch.sigmoid(hidden_states)
516
+ hidden_states = self.conv1(hidden_states)
517
+
518
+ hidden_states = self.norm2(hidden_states)
519
+ hidden_states *= torch.sigmoid(hidden_states)
520
+ hidden_states = self.conv2(hidden_states)
521
+
522
+ if self.in_channels != self.out_channels:
523
+ residual = self.nin_shortcut(residual)
524
+
525
+ return residual + hidden_states
526
+
527
+
528
+ class Emu3VQVAEResnetBlock(nn.Module):
529
+ def __init__(
530
+ self,
531
+ in_channels: int,
532
+ out_channels: Optional[int] = None,
533
+ quant_channels: Optional[int] = None,
534
+ ):
535
+ super().__init__()
536
+ self.in_channels = in_channels
537
+ out_channels = in_channels if out_channels is None else out_channels
538
+ self.out_channels = out_channels
539
+ self.quant_channels = quant_channels
540
+
541
+ if quant_channels is None:
542
+ self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
543
+ self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-6, affine=True)
544
+ else:
545
+ self.norm1 = Emu3VQVAESpatialNorm(quant_channels, in_channels)
546
+ self.norm2 = Emu3VQVAESpatialNorm(quant_channels, out_channels)
547
+
548
+ self.conv1 = nn.Conv2d(
549
+ in_channels,
550
+ out_channels,
551
+ kernel_size=3,
552
+ stride=1,
553
+ padding=1,
554
+ )
555
+
556
+ self.conv2 = nn.Conv2d(
557
+ out_channels,
558
+ out_channels,
559
+ kernel_size=3,
560
+ stride=1,
561
+ padding=1,
562
+ )
563
+
564
+ if self.in_channels != self.out_channels:
565
+ self.nin_shortcut = nn.Conv2d(
566
+ in_channels,
567
+ out_channels,
568
+ kernel_size=1,
569
+ stride=1,
570
+ padding=0,
571
+ )
572
+
573
+ def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor] = None):
574
+ norm_args = () if self.quant_channels is None else (quant_channels,)
575
+
576
+ residual = hidden_states
577
+ hidden_states = self.norm1(hidden_states, *norm_args)
578
+ hidden_states *= torch.sigmoid(hidden_states)
579
+ hidden_states = self.conv1(hidden_states)
580
+
581
+ hidden_states = self.norm2(hidden_states, *norm_args)
582
+ hidden_states *= torch.sigmoid(hidden_states)
583
+ hidden_states = self.conv2(hidden_states)
584
+
585
+ if self.in_channels != self.out_channels:
586
+ residual = self.nin_shortcut(residual)
587
+
588
+ return residual + hidden_states
589
+
590
+
591
+ class Emu3VQVAEAttentionBlock(nn.Module):
592
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
593
+
594
+ def __init__(self, config):
595
+ super().__init__()
596
+ self.config = config
597
+ self.embed_dim = config.hidden_size
598
+ self.num_heads = config.num_attention_heads
599
+ self.head_dim = self.embed_dim // self.num_heads
600
+ if self.head_dim * self.num_heads != self.embed_dim:
601
+ raise ValueError(
602
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
603
+ f" {self.num_heads})."
604
+ )
605
+ self.scale = self.head_dim**-0.5
606
+ self.dropout = config.attention_dropout
607
+
608
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
609
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
610
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
611
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
612
+
613
+ def forward(
614
+ self,
615
+ hidden_states: torch.Tensor,
616
+ attention_mask: Optional[torch.Tensor] = None,
617
+ output_attentions: Optional[bool] = False,
618
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
619
+ """Input shape: Batch x Time x Channel"""
620
+
621
+ batch_size, q_len, _ = hidden_states.size()
622
+
623
+ query_states = self.q_proj(hidden_states)
624
+ key_states = self.k_proj(hidden_states)
625
+ value_states = self.v_proj(hidden_states)
626
+
627
+ query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
628
+ key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
629
+ value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
630
+
631
+ k_v_seq_len = key_states.shape[-2]
632
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
633
+
634
+ if attn_weights.size() != (batch_size, self.num_heads, q_len, k_v_seq_len):
635
+ raise ValueError(
636
+ f"Attention weights should be of size {(batch_size, self.num_heads, q_len, k_v_seq_len)}, but is"
637
+ f" {attn_weights.size()}"
638
+ )
639
+
640
+ if attention_mask is not None:
641
+ if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
642
+ raise ValueError(
643
+ f"Attention mask should be of size {(batch_size, 1, q_len, k_v_seq_len)}, but is {attention_mask.size()}"
644
+ )
645
+ attn_weights = attn_weights + attention_mask
646
+
647
+ # upcast attention to fp32
648
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
649
+ attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
650
+ attn_output = torch.matmul(attn_weights, value_states)
651
+
652
+ if attn_output.size() != (batch_size, self.num_heads, q_len, self.head_dim):
653
+ raise ValueError(
654
+ f"`attn_output` should be of size {(batch_size, self.num_heads, q_len, self.head_dim)}, but is"
655
+ f" {attn_output.size()}"
656
+ )
657
+
658
+ attn_output = attn_output.transpose(1, 2).contiguous()
659
+ attn_output = attn_output.reshape(batch_size, q_len, self.embed_dim)
660
+
661
+ attn_output = self.out_proj(attn_output)
662
+
663
+ return attn_output, attn_weights
664
+
665
+
666
+ class Emu3VQVAEGroupNorm(nn.GroupNorm):
667
+ """
668
+ Same as the torch GroupNorm with the only difference that this ones accepts
669
+ an optional kwarg `quant_states` which is not used. This class makes it easier to
670
+ use SpatialNorm or GroupNorm without conditionals
671
+ """
672
+
673
+ def __init__(self, **kwargs):
674
+ super().__init__(**kwargs)
675
+
676
+ def forward(self, input, quant_states=None):
677
+ return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
678
+
679
+
680
+ class Emu3VQVAEMiddleBlock(nn.Module):
681
+ def __init__(self, config, in_channels, quant_channels=None):
682
+ super().__init__()
683
+
684
+ self.block_1 = Emu3VQVAEResnetBlock(
685
+ in_channels=in_channels,
686
+ out_channels=in_channels,
687
+ quant_channels=quant_channels,
688
+ )
689
+ self.attn_1 = Emu3VQVAEAttentionBlock(config)
690
+ if quant_channels is None:
691
+ self.attn_norm = Emu3VQVAEGroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
692
+ else:
693
+ self.attn_norm = Emu3VQVAESpatialNorm(quant_channels, in_channels)
694
+
695
+ self.block_2 = Emu3VQVAEResnetBlock(
696
+ in_channels=in_channels,
697
+ out_channels=in_channels,
698
+ quant_channels=quant_channels,
699
+ )
700
+
701
+ def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor = None):
702
+ hidden_states = self.block_1(hidden_states, quant_states)
703
+ residual = hidden_states
704
+ hidden_states = self.attn_norm(hidden_states, quant_states)
705
+ batch_size, channels, height, width = hidden_states.shape
706
+ hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
707
+ hidden_states = self.attn_1(hidden_states)[0]
708
+ hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
709
+ hidden_states = residual + hidden_states
710
+ hidden_states = self.block_2(hidden_states, quant_states)
711
+ return hidden_states
712
+
713
+
714
+ class Emu3VQVAEDownBlock(nn.Module):
715
+ def __init__(self, config):
716
+ super().__init__()
717
+
718
+ self.num_resolutions = len(config.channel_multiplier)
719
+ self.num_res_blocks = config.num_res_blocks
720
+ base_channels = config.base_channels
721
+ channel_multiplier = config.channel_multiplier
722
+
723
+ in_channel_multiplier = (1,) + tuple(channel_multiplier)
724
+ self.in_channel_multiplier = in_channel_multiplier
725
+ self.down = nn.ModuleList()
726
+ for i_level in range(self.num_resolutions):
727
+ block = nn.ModuleList()
728
+ attn = nn.ModuleList()
729
+ attn_norms = nn.ModuleList()
730
+ block_in = base_channels * in_channel_multiplier[i_level]
731
+ block_out = base_channels * channel_multiplier[i_level]
732
+ for i_block in range(self.num_res_blocks):
733
+ block.append(
734
+ Emu3VQVAEResnetBlock(
735
+ in_channels=block_in,
736
+ out_channels=block_out,
737
+ )
738
+ )
739
+ block_in = block_out
740
+ if config.attn_resolutions is not None and i_level in config.attn_resolutions:
741
+ attn.append(Emu3VQVAEAttentionBlock(config))
742
+ attn_norms.append(nn.GroupNorm(num_channels=block_in, num_groups=32, eps=1e-6, affine=True))
743
+
744
+ down = nn.Module()
745
+ down.block = block
746
+ down.attn = attn
747
+ down.attn_norms = attn_norms
748
+ if i_level != self.num_resolutions - 1:
749
+ down.downsample = Emu3VQVAEEncoderConvDownsample(block_in)
750
+ self.down.append(down)
751
+
752
+ def forward(self, hidden_states: torch.FloatTensor):
753
+ for i_level, blocks in enumerate(self.down):
754
+ for i_block in range(self.num_res_blocks):
755
+ hidden_states = blocks.block[i_block](hidden_states)
756
+ if len(blocks.attn) > 0:
757
+ residual = hidden_states
758
+ hidden_states = blocks.attn_norms[i_block](hidden_states)
759
+
760
+ batch_size, channels, height, width = hidden_states.shape
761
+ hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
762
+ hidden_states = blocks.attn[i_block](hidden_states)[0]
763
+
764
+ hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
765
+ hidden_states = residual + hidden_states
766
+
767
+ if i_level != self.num_resolutions - 1:
768
+ hidden_states = blocks.downsample(hidden_states)
769
+
770
+ return hidden_states
771
+
772
+
773
+ class Emu3VQVAEUpBlock(nn.Module):
774
+ def __init__(self, config):
775
+ super().__init__()
776
+
777
+ self.num_resolutions = len(config.channel_multiplier)
778
+ self.num_res_blocks = config.num_res_blocks
779
+
780
+ quant_channels = config.embed_dim
781
+ block_in = config.base_channels * config.channel_multiplier[-1]
782
+
783
+ self.up = nn.ModuleList()
784
+ for i_level in reversed(range(self.num_resolutions)):
785
+ block = nn.ModuleList()
786
+ attn = nn.ModuleList()
787
+ attn_norms = nn.ModuleList()
788
+ block_out = config.base_channels * config.channel_multiplier[i_level]
789
+ for i_block in range(self.num_res_blocks + 1):
790
+ block.append(
791
+ Emu3VQVAEResnetBlock(
792
+ in_channels=block_in,
793
+ out_channels=block_out,
794
+ quant_channels=quant_channels,
795
+ )
796
+ )
797
+ block_in = block_out
798
+ if i_level in config.attn_resolutions:
799
+ attn.append(Emu3VQVAEAttentionBlock(config))
800
+ attn_norms.append(Emu3VQVAESpatialNorm(quant_channels, block_in))
801
+
802
+ up = nn.Module()
803
+ up.block = block
804
+ up.attn = attn
805
+ up.attn_norms = attn_norms
806
+ if i_level != 0:
807
+ up.upsample = Emu3VQVAEEncoderConvUpsample(block_in)
808
+
809
+ self.up.insert(0, up)
810
+
811
+ def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
812
+ for i_level, blocks in enumerate(self.up[::-1]):
813
+ for i_block in range(self.num_res_blocks + 1):
814
+ hidden_states = blocks.block[i_block](hidden_states, quant_states)
815
+ if len(blocks.attn) > 0:
816
+ residual = hidden_states
817
+ hidden_states = blocks.attn_norms[i_block](hidden_states, quant_states)
818
+
819
+ batch_size, channels, height, width = hidden_states.shape
820
+ hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
821
+ hidden_states = blocks.attn[i_block](hidden_states)[0]
822
+
823
+ hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
824
+ hidden_states = residual + hidden_states
825
+ if i_level != len(self.up) - 1:
826
+ hidden_states = blocks.upsample(hidden_states)
827
+
828
+ return hidden_states
829
+
830
+
831
+ class Emu3VQVAEEncoder(nn.Module):
832
+ def __init__(self, config):
833
+ super().__init__()
834
+
835
+ base_channels = config.base_channels
836
+ in_channels = config.in_channels
837
+ double_latent = config.double_latent
838
+ latent_channels = config.latent_channels
839
+ channel_multiplier = config.channel_multiplier
840
+ out_channels = 2 * latent_channels if double_latent else latent_channels
841
+ block_in = base_channels * channel_multiplier[-1]
842
+
843
+ self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1)
844
+ self.down_block = Emu3VQVAEDownBlock(config)
845
+ self.middle_block = Emu3VQVAEMiddleBlock(config, block_in)
846
+
847
+ self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
848
+ self.conv_out = torch.nn.Conv2d(
849
+ block_in,
850
+ out_channels,
851
+ kernel_size=3,
852
+ stride=1,
853
+ padding=1,
854
+ )
855
+
856
+ temporal_down_blocks = int(math.log2(config.temporal_downsample_factor))
857
+ self.time_conv = nn.ModuleList()
858
+ self.time_res_stack = nn.ModuleList()
859
+
860
+ for i in range(temporal_down_blocks):
861
+ conv = Emu3VQVAETemporalDownsample(out_channels, out_channels)
862
+ self.time_conv.append(conv)
863
+
864
+ for _ in range(config.num_res_blocks):
865
+ time_res_conv = Emu3VQVAETemporalResnetBlock(
866
+ in_channels=out_channels,
867
+ out_channels=out_channels,
868
+ )
869
+ self.time_res_stack.append(time_res_conv)
870
+
871
+ def forward(self, pixel_values: torch.LongTensor):
872
+ temporal_dim = pixel_values.shape[1]
873
+ pixel_values = pixel_values.reshape(-1, *pixel_values.shape[2:])
874
+
875
+ # downsampling & middle
876
+ hidden_states = self.conv_in(pixel_values)
877
+ hidden_states = self.down_block(hidden_states)
878
+ hidden_states = self.middle_block(hidden_states)
879
+
880
+ # end
881
+ hidden_states = self.norm_out(hidden_states)
882
+ hidden_states *= torch.sigmoid(hidden_states)
883
+ hidden_states = self.conv_out(hidden_states)
884
+
885
+ hidden_states = hidden_states.reshape(-1, temporal_dim, *hidden_states.shape[1:])
886
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
887
+
888
+ # temporal convs
889
+ for conv in self.time_conv:
890
+ hidden_states = conv(hidden_states)
891
+ hidden_states *= torch.sigmoid(hidden_states)
892
+
893
+ for layer in self.time_res_stack:
894
+ hidden_states = layer(hidden_states)
895
+
896
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
897
+
898
+ return hidden_states
899
+
900
+
901
+ class Emu3VQVAEDecoder(nn.Module):
902
+ def __init__(self, config: Emu3VQVAEConfig):
903
+ super().__init__()
904
+
905
+ quant_channels = config.embed_dim
906
+ block_in = config.base_channels * config.channel_multiplier[-1]
907
+ self.time_res_stack = nn.ModuleList()
908
+ for _ in range(config.num_res_blocks):
909
+ time_res_conv = Emu3VQVAETemporalResnetBlock(
910
+ in_channels=config.latent_channels, out_channels=config.latent_channels
911
+ )
912
+ self.time_res_stack.append(time_res_conv)
913
+
914
+ temp_upsample_block_num = int(math.log2(config.temporal_downsample_factor))
915
+ self.time_conv = nn.ModuleList()
916
+ for i in range(temp_upsample_block_num):
917
+ conv = Emu3VQVAETemporalUpsample(config.latent_channels, config.latent_channels)
918
+ self.time_conv.append(conv)
919
+
920
+ self.conv_in = nn.Conv2d(
921
+ config.latent_channels,
922
+ block_in,
923
+ kernel_size=3,
924
+ stride=1,
925
+ padding=1,
926
+ )
927
+
928
+ self.middle_block = Emu3VQVAEMiddleBlock(config, block_in, quant_channels=quant_channels)
929
+ self.up_block = Emu3VQVAEUpBlock(config)
930
+
931
+ block_in = config.base_channels * config.channel_multiplier[0]
932
+ self.norm_out = Emu3VQVAESpatialNorm(quant_channels, block_in)
933
+ self.conv_out = nn.Conv2d(
934
+ block_in,
935
+ config.out_channels,
936
+ kernel_size=3,
937
+ stride=1,
938
+ padding=1,
939
+ )
940
+
941
+ def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
942
+ hidden_quant_states = torch.cat((hidden_states, quant_states), dim=0)
943
+ hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
944
+
945
+ # temporal convs
946
+ for layer in self.time_res_stack:
947
+ hidden_quant_states = layer(hidden_quant_states)
948
+
949
+ for layer in self.time_conv:
950
+ hidden_quant_states = layer(hidden_quant_states)
951
+ hidden_quant_states *= torch.sigmoid(hidden_quant_states)
952
+
953
+ hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
954
+ hidden_states, quant_states = torch.chunk(hidden_quant_states, 2, dim=0)
955
+ hidden_states = hidden_states.reshape(-1, *hidden_states.shape[2:])
956
+ quant_states = quant_states.reshape(-1, *quant_states.shape[2:])
957
+
958
+ hidden_states = self.conv_in(hidden_states)
959
+
960
+ # middle & upsampling
961
+ hidden_states = self.middle_block(hidden_states, quant_states)
962
+ hidden_states = self.up_block(hidden_states, quant_states)
963
+
964
+ hidden_states = self.norm_out(hidden_states, quant_states)
965
+ hidden_states *= torch.sigmoid(hidden_states)
966
+ hidden_states = self.conv_out(hidden_states)
967
+
968
+ return hidden_states
969
+
970
+
971
+ EMU3_VQ_START_DOCSTRING = r"""
972
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
973
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
974
+ etc.)
975
+
976
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
977
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
978
+ and behavior.
979
+
980
+ Parameters:
981
+ config ([`Emu3VQVAEConfig`]):
982
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
983
+ load the weights associated with the model, only the configuration. Check out the
984
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
985
+ """
986
+
987
+
988
+ @add_start_docstrings(
989
+ """The VQ-VAE model used in Emu3 for encoding/decoding images into discrete tokens.
990
+ This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from
991
+ [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman](https://arxiv.org/abs/2203.13131).
992
+ """,
993
+ EMU3_VQ_START_DOCSTRING,
994
+ )
995
+ class Emu3VQVAE(PreTrainedModel):
996
+ config_class = Emu3VQVAEConfig
997
+ base_model_prefix = "emuvideovq"
998
+ main_input_name = "pixel_values"
999
+ _no_split_modules = [
1000
+ "Emu3VQVAETemporalResnetBlock",
1001
+ "Emu3VQVAEAttentionBlock",
1002
+ "Emu3VQVAEResnetBlock",
1003
+ "Emu3VQVAEVectorQuantizer",
1004
+ ]
1005
+
1006
+ def _init_weights(self, module):
1007
+ if isinstance(module, (nn.Conv2d, nn.Conv3d)):
1008
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
1009
+ elif isinstance(module, nn.Linear):
1010
+ nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
1011
+ if module.bias is not None:
1012
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
1013
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
1014
+ nn.init.uniform_(module.bias, -bound, bound)
1015
+ elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
1016
+ nn.init.constant_(module.weight, 1)
1017
+ nn.init.constant_(module.bias, 0)
1018
+
1019
+ def __init__(self, config: Emu3VQVAEConfig):
1020
+ super().__init__(config)
1021
+
1022
+ self.config = config
1023
+
1024
+ self.encoder = Emu3VQVAEEncoder(config)
1025
+ self.decoder = Emu3VQVAEDecoder(config)
1026
+ self.quantize = Emu3VQVAEVectorQuantizer(config)
1027
+ self.vision_spatial_factor = 2 ** (len(config.channel_multiplier) - 1)
1028
+
1029
+ self.quant_conv = Emu3VQVAEConv3d(
1030
+ config.latent_channels, config.embed_dim, kernel_size=(3, 1, 1), stride=(1, 1, 1)
1031
+ )
1032
+ self.post_quant_conv = Emu3VQVAEConv3d(
1033
+ config.embed_dim, config.latent_channels, kernel_size=(3, 1, 1), stride=(1, 1, 1)
1034
+ )
1035
+ self.spatial_scale_factor = 2 ** (len(config.channel_multiplier) - 1)
1036
+ self.eval() # Emu3's VQ model is frozen
1037
+
1038
+ self.post_init()
1039
+
1040
+ def encode(self, pixel_values: torch.Tensor, image_sizes: torch.Tensor):
1041
+ is_image = pixel_values.ndim == 4
1042
+ if is_image:
1043
+ temporal = self.config.temporal_downsample_factor
1044
+ batch_size, channels, height, width = pixel_values.shape
1045
+ pixel_values = pixel_values.unsqueeze(1).repeat(1, temporal, 1, 1, 1)
1046
+ else:
1047
+ batch_size, temporal, channels, height, width = pixel_values.shape
1048
+
1049
+ hidden_states = self.encoder(pixel_values)
1050
+
1051
+ # b t c h w -> b c t h w
1052
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
1053
+ hidden_states = self.quant_conv(hidden_states)
1054
+
1055
+ # b c t h w -> b t c h w
1056
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
1057
+ codes = self.quantize(hidden_states)
1058
+
1059
+ image_tokens = codes.squeeze(1) if is_image else codes
1060
+
1061
+ image_tokens = [
1062
+ single_image[: int(size[0] / self.vision_spatial_factor), : int(size[1] / self.vision_spatial_factor)]
1063
+ for single_image, size in zip(image_tokens, image_sizes)
1064
+ ]
1065
+
1066
+ return image_tokens
1067
+
1068
+ def decode(self, hidden_states: torch.Tensor):
1069
+ is_image = hidden_states.ndim == 3
1070
+ if is_image:
1071
+ hidden_states = hidden_states.unsqueeze(1)
1072
+
1073
+ batch_size, temporal, height, width = hidden_states.shape
1074
+ quant = self.quantize.embedding(hidden_states.flatten())
1075
+
1076
+ channels = quant.shape[-1]
1077
+ quant = quant.view(batch_size, temporal, height, width, channels).permute(0, 4, 1, 2, 3).contiguous()
1078
+ post_quant = self.post_quant_conv(quant)
1079
+
1080
+ quant = quant.permute(0, 2, 1, 3, 4)
1081
+ post_quant = post_quant.permute(0, 2, 1, 3, 4)
1082
+
1083
+ video = self.decoder(post_quant, quant)
1084
+ video = video.reshape(
1085
+ batch_size,
1086
+ temporal * self.config.temporal_downsample_factor,
1087
+ self.config.out_channels,
1088
+ height * self.spatial_scale_factor,
1089
+ width * self.spatial_scale_factor,
1090
+ )
1091
+ return video[:, 0] if is_image else video
1092
+
1093
+
1094
+ class Emu3ImageVocabularyMapping:
1095
+ """
1096
+ A class for mapping discrete image tokens from VQGAN to BPE tokens.
1097
+ """
1098
+
1099
+ def __init__(self, vocab_map):
1100
+ self.vocab_map = vocab_map
1101
+ self.eol_token_id = vocab_map.get("<|extra_200|>")
1102
+ self.image_token_id = vocab_map.get("<image>")
1103
+
1104
+ @cached_property
1105
+ def image_tokens(self):
1106
+ return sorted([val for name, val in self.vocab_map.items() if name.startswith("<|visual token")])
1107
+
1108
+ @cached_property
1109
+ def image_tokens_str(self):
1110
+ return sorted([name for name, val in self.vocab_map.items() if name.startswith("<|visual token")])
1111
+
1112
+ @cached_property
1113
+ def img2bpe(self):
1114
+ return {int(token[-8:-2]): self.vocab_map[token] for token in self.image_tokens_str}
1115
+
1116
+ @cached_property
1117
+ def bpe2img(self):
1118
+ return {v: k for k, v in self.img2bpe.items()}
1119
+
1120
+ @cached_property
1121
+ def bpe2img_mapping_tensor(self):
1122
+ mapping = torch.zeros(max(self.bpe2img.keys()) + 1, dtype=torch.int)
1123
+ for k, v in self.bpe2img.items():
1124
+ mapping[k] = v
1125
+ return mapping
1126
+
1127
+ @cached_property
1128
+ def img2bpe_mapping_tensor(self):
1129
+ mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int)
1130
+ for k, v in self.img2bpe.items():
1131
+ mapping[k] = v
1132
+ return mapping
1133
+
1134
+ def convert_img2bpe(self, img_batch: List[torch.Tensor]) -> torch.Tensor:
1135
+ device = img_batch.device
1136
+ eol_row = torch.ones((img_batch.shape[0], 1), dtype=torch.int) * self.eol_token_id
1137
+ img_tokens = self.img2bpe_mapping_tensor[img_batch.to("cpu")]
1138
+ img_tokens = torch.cat([img_tokens, eol_row], dim=-1)
1139
+ return img_tokens.to(device)
1140
+
1141
+ def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor:
1142
+ device = img_batch.device
1143
+ img_batch = img_batch[..., :-1] # remove last row of EOL tokens
1144
+ img_tokens = self.bpe2img_mapping_tensor[img_batch.to("cpu")]
1145
+ return img_tokens.to(device)
1146
+
1147
+
1148
+ EMU3_START_DOCSTRING = r"""
1149
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
1150
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1151
+ etc.)
1152
+
1153
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
1154
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
1155
+ and behavior.
1156
+
1157
+ Parameters:
1158
+ config ([`Emu3Config`]):
1159
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
1160
+ load the weights associated with the model, only the configuration. Check out the
1161
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1162
+ """
1163
+
1164
+
1165
+ @add_start_docstrings(
1166
+ "The bare emu3 Model outputting raw hidden-states without any specific head on top.",
1167
+ EMU3_START_DOCSTRING,
1168
+ )
1169
+ class Emu3PreTrainedModel(PreTrainedModel):
1170
+ config_class = Emu3Config
1171
+ base_model_prefix = "model"
1172
+ supports_gradient_checkpointing = True
1173
+ _no_split_modules = [
1174
+ "Emu3DecoderLayer",
1175
+ ]
1176
+ _skip_keys_device_placement = ["past_key_values", "causal_mask"]
1177
+ _supports_flash_attn_2 = True
1178
+ _supports_sdpa = True
1179
+ _supports_quantized_cache = True
1180
+ _supports_cache_class = True
1181
+ _supports_static_cache = True
1182
+ _supports_param_buffer_assignment = False
1183
+ _supports_flex_attn = True
1184
+
1185
+ def _init_weights(self, module):
1186
+ std = self.config.get_text_config().initializer_range
1187
+ if isinstance(module, Emu3VQVAE):
1188
+ module.apply(module._init_weights)
1189
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
1190
+ module.weight.data.normal_(mean=0.0, std=std)
1191
+ if module.bias is not None:
1192
+ module.bias.data.zero_()
1193
+ elif isinstance(module, nn.Embedding):
1194
+ module.weight.data.normal_(mean=0.0, std=std)
1195
+ if module.padding_idx is not None:
1196
+ module.weight.data[module.padding_idx].zero_()
1197
+
1198
+
1199
+ class Emu3RotaryEmbedding(nn.Module):
1200
+ def __init__(self, config: Emu3Config, device=None):
1201
+ super().__init__()
1202
+ # BC: "rope_type" was originally "type"
1203
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
1204
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
1205
+ else:
1206
+ self.rope_type = "default"
1207
+ self.max_seq_len_cached = config.max_position_embeddings
1208
+ self.original_max_seq_len = config.max_position_embeddings
1209
+
1210
+ self.config = config
1211
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
1212
+
1213
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
1214
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
1215
+ self.original_inv_freq = self.inv_freq
1216
+
1217
+ def _dynamic_frequency_update(self, position_ids, device):
1218
+ """
1219
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
1220
+ 1 - growing beyond the cached sequence length (allow scaling)
1221
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
1222
+ """
1223
+ seq_len = torch.max(position_ids) + 1
1224
+ if seq_len > self.max_seq_len_cached: # growth
1225
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
1226
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
1227
+ self.max_seq_len_cached = seq_len
1228
+
1229
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
1230
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
1231
+ self.max_seq_len_cached = self.original_max_seq_len
1232
+
1233
+ @torch.no_grad()
1234
+ def forward(self, x, position_ids):
1235
+ if "dynamic" in self.rope_type:
1236
+ self._dynamic_frequency_update(position_ids, device=x.device)
1237
+
1238
+ # Core RoPE block
1239
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
1240
+ position_ids_expanded = position_ids[:, None, :].float()
1241
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
1242
+ device_type = x.device.type
1243
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
1244
+ with torch.autocast(device_type=device_type, enabled=False):
1245
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
1246
+ emb = torch.cat((freqs, freqs), dim=-1)
1247
+ cos = emb.cos()
1248
+ sin = emb.sin()
1249
+
1250
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
1251
+ cos = cos * self.attention_scaling
1252
+ sin = sin * self.attention_scaling
1253
+
1254
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
1255
+
1256
+
1257
+ EMU3_INPUTS_DOCSTRING = r"""
1258
+ Args:
1259
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1260
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1261
+ it.
1262
+
1263
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1264
+ [`PreTrainedTokenizer.__call__`] for details.
1265
+
1266
+ [What are input IDs?](../glossary#input-ids)
1267
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1268
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1269
+
1270
+ - 1 for tokens that are **not masked**,
1271
+ - 0 for tokens that are **masked**.
1272
+
1273
+ [What are attention masks?](../glossary#attention-mask)
1274
+
1275
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1276
+ [`PreTrainedTokenizer.__call__`] for details.
1277
+
1278
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1279
+ `past_key_values`).
1280
+
1281
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1282
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1283
+ information on the default strategy.
1284
+
1285
+ - 1 indicates the head is **not masked**,
1286
+ - 0 indicates the head is **masked**.
1287
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1288
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1289
+ config.n_positions - 1]`.
1290
+
1291
+ [What are position IDs?](../glossary#position-ids)
1292
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1293
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1294
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1295
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1296
+
1297
+ Two formats are allowed:
1298
+ - a [`~cache_utils.Cache`] instance, see our
1299
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
1300
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1301
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1302
+ cache format.
1303
+
1304
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1305
+ legacy cache format will be returned.
1306
+
1307
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1308
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1309
+ of shape `(batch_size, sequence_length)`.
1310
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1311
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1312
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1313
+ model's internal embedding lookup matrix.
1314
+ use_cache (`bool`, *optional*):
1315
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1316
+ `past_key_values`).
1317
+ output_attentions (`bool`, *optional*):
1318
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1319
+ tensors for more detail.
1320
+ output_hidden_states (`bool`, *optional*):
1321
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1322
+ more detail.
1323
+ return_dict (`bool`, *optional*):
1324
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1325
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1326
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1327
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1328
+ the complete sequence length.
1329
+ """
1330
+
1331
+
1332
+ @add_start_docstrings(
1333
+ "The bare Emu3Text Model outputting raw hidden-states without any specific head on top.",
1334
+ EMU3_START_DOCSTRING,
1335
+ )
1336
+ class Emu3TextModel(Emu3PreTrainedModel):
1337
+ """
1338
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Emu3TextDecoderLayer`]
1339
+
1340
+ Args:
1341
+ config: Emu3TextConfig
1342
+ """
1343
+
1344
+ def __init__(self, config: Emu3Config):
1345
+ super().__init__(config)
1346
+ self.padding_idx = config.pad_token_id
1347
+ self.vocab_size = config.vocab_size
1348
+
1349
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1350
+ self.layers = nn.ModuleList(
1351
+ [Emu3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1352
+ )
1353
+ self.norm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1354
+ self.rotary_emb = Emu3RotaryEmbedding(config=config)
1355
+ self.gradient_checkpointing = False
1356
+
1357
+ # Initialize weights and apply final processing
1358
+ self.post_init()
1359
+
1360
+ def get_input_embeddings(self):
1361
+ return self.embed_tokens
1362
+
1363
+ def set_input_embeddings(self, value):
1364
+ self.embed_tokens = value
1365
+
1366
+ @add_start_docstrings_to_model_forward(EMU3_INPUTS_DOCSTRING)
1367
+ def forward(
1368
+ self,
1369
+ input_ids: torch.LongTensor = None,
1370
+ attention_mask: Optional[torch.Tensor] = None,
1371
+ position_ids: Optional[torch.LongTensor] = None,
1372
+ past_key_values: Optional[Cache] = None,
1373
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1374
+ use_cache: Optional[bool] = None,
1375
+ output_attentions: Optional[bool] = None,
1376
+ output_hidden_states: Optional[bool] = None,
1377
+ return_dict: Optional[bool] = None,
1378
+ cache_position: Optional[torch.LongTensor] = None,
1379
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
1380
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1381
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1382
+ output_hidden_states = (
1383
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1384
+ )
1385
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1386
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1387
+
1388
+ if (input_ids is None) ^ (inputs_embeds is not None):
1389
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1390
+
1391
+ if self.gradient_checkpointing and self.training and use_cache:
1392
+ logger.warning_once(
1393
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1394
+ )
1395
+ use_cache = False
1396
+
1397
+ if inputs_embeds is None:
1398
+ inputs_embeds = self.embed_tokens(input_ids)
1399
+
1400
+ if use_cache and past_key_values is None:
1401
+ past_key_values = DynamicCache()
1402
+
1403
+ if cache_position is None:
1404
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1405
+ cache_position = torch.arange(
1406
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1407
+ )
1408
+
1409
+ if position_ids is None:
1410
+ position_ids = cache_position.unsqueeze(0)
1411
+
1412
+ causal_mask = self._update_causal_mask(
1413
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
1414
+ )
1415
+
1416
+ hidden_states = inputs_embeds
1417
+
1418
+ # create position embeddings to be shared across the decoder layers
1419
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1420
+
1421
+ # decoder layers
1422
+ all_hidden_states = () if output_hidden_states else None
1423
+ all_self_attns = () if output_attentions else None
1424
+
1425
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
1426
+ if output_hidden_states:
1427
+ all_hidden_states += (hidden_states,)
1428
+
1429
+ if self.gradient_checkpointing and self.training:
1430
+ layer_outputs = self._gradient_checkpointing_func(
1431
+ decoder_layer.__call__,
1432
+ hidden_states,
1433
+ causal_mask,
1434
+ position_ids,
1435
+ past_key_values,
1436
+ output_attentions,
1437
+ use_cache,
1438
+ cache_position,
1439
+ position_embeddings,
1440
+ )
1441
+ else:
1442
+ layer_outputs = decoder_layer(
1443
+ hidden_states,
1444
+ attention_mask=causal_mask,
1445
+ position_ids=position_ids,
1446
+ past_key_value=past_key_values,
1447
+ output_attentions=output_attentions,
1448
+ use_cache=use_cache,
1449
+ cache_position=cache_position,
1450
+ position_embeddings=position_embeddings,
1451
+ **flash_attn_kwargs,
1452
+ )
1453
+
1454
+ hidden_states = layer_outputs[0]
1455
+
1456
+ if output_attentions:
1457
+ all_self_attns += (layer_outputs[1],)
1458
+
1459
+ hidden_states = self.norm(hidden_states)
1460
+
1461
+ # add hidden states from the last decoder layer
1462
+ if output_hidden_states:
1463
+ all_hidden_states += (hidden_states,)
1464
+
1465
+ output = BaseModelOutputWithPast(
1466
+ last_hidden_state=hidden_states,
1467
+ past_key_values=past_key_values if use_cache else None,
1468
+ hidden_states=all_hidden_states,
1469
+ attentions=all_self_attns,
1470
+ )
1471
+ return output if return_dict else output.to_tuple()
1472
+
1473
+ def _update_causal_mask(
1474
+ self,
1475
+ attention_mask: torch.Tensor,
1476
+ input_tensor: torch.Tensor,
1477
+ cache_position: torch.Tensor,
1478
+ past_key_values: Cache,
1479
+ output_attentions: bool,
1480
+ ):
1481
+ if self.config._attn_implementation == "flash_attention_2":
1482
+ if attention_mask is not None and (attention_mask == 0.0).any():
1483
+ return attention_mask
1484
+ return None
1485
+
1486
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1487
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1488
+ # to infer the attention mask.
1489
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1490
+ using_static_cache = isinstance(past_key_values, StaticCache)
1491
+
1492
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1493
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
1494
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1495
+ attention_mask,
1496
+ inputs_embeds=input_tensor,
1497
+ past_key_values_length=past_seen_tokens,
1498
+ is_training=self.training,
1499
+ ):
1500
+ return None
1501
+
1502
+ dtype, device = input_tensor.dtype, input_tensor.device
1503
+ sequence_length = input_tensor.shape[1]
1504
+ if using_static_cache:
1505
+ target_length = past_key_values.get_max_cache_shape()
1506
+ else:
1507
+ target_length = (
1508
+ attention_mask.shape[-1]
1509
+ if isinstance(attention_mask, torch.Tensor)
1510
+ else past_seen_tokens + sequence_length + 1
1511
+ )
1512
+
1513
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1514
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
1515
+ attention_mask,
1516
+ sequence_length=sequence_length,
1517
+ target_length=target_length,
1518
+ dtype=dtype,
1519
+ device=device,
1520
+ cache_position=cache_position,
1521
+ batch_size=input_tensor.shape[0],
1522
+ )
1523
+
1524
+ if (
1525
+ self.config._attn_implementation == "sdpa"
1526
+ and attention_mask is not None
1527
+ and attention_mask.device.type == "cuda"
1528
+ and not output_attentions
1529
+ ):
1530
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1531
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1532
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1533
+ min_dtype = torch.finfo(dtype).min
1534
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1535
+
1536
+ return causal_mask
1537
+
1538
+ @staticmethod
1539
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1540
+ attention_mask: torch.Tensor,
1541
+ sequence_length: int,
1542
+ target_length: int,
1543
+ dtype: torch.dtype,
1544
+ device: torch.device,
1545
+ cache_position: torch.Tensor,
1546
+ batch_size: int,
1547
+ **kwargs,
1548
+ ):
1549
+ """
1550
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1551
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1552
+
1553
+ Args:
1554
+ attention_mask (`torch.Tensor`):
1555
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
1556
+ `(batch_size, 1, query_length, key_value_length)`.
1557
+ sequence_length (`int`):
1558
+ The sequence length being processed.
1559
+ target_length (`int`):
1560
+ The target length: when generating with static cache, the mask should be as long as the static cache,
1561
+ to account for the 0 padding, the part of the cache that is not filled yet.
1562
+ dtype (`torch.dtype`):
1563
+ The dtype to use for the 4D attention mask.
1564
+ device (`torch.device`):
1565
+ The device to plcae the 4D attention mask on.
1566
+ cache_position (`torch.Tensor`):
1567
+ Indices depicting the position of the input sequence tokens in the sequence.
1568
+ batch_size (`torch.Tensor`):
1569
+ Batch size.
1570
+ """
1571
+ if attention_mask is not None and attention_mask.dim() == 4:
1572
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1573
+ causal_mask = attention_mask
1574
+ else:
1575
+ min_dtype = torch.finfo(dtype).min
1576
+ causal_mask = torch.full(
1577
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
1578
+ )
1579
+ if sequence_length != 1:
1580
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1581
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1582
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1583
+ if attention_mask is not None:
1584
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1585
+ mask_length = attention_mask.shape[-1]
1586
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1587
+ padding_mask = padding_mask == 0
1588
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1589
+ padding_mask, min_dtype
1590
+ )
1591
+
1592
+ return causal_mask
1593
+
1594
+
1595
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
1596
+
1597
+
1598
+ EMU3_TEXT_INPUTS_DOCSTRING = r"""
1599
+ Args:
1600
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1601
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1602
+ it.
1603
+
1604
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1605
+ [`PreTrainedTokenizer.__call__`] for details.
1606
+
1607
+ [What are input IDs?](../glossary#input-ids)
1608
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1609
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1610
+
1611
+ - 1 for tokens that are **not masked**,
1612
+ - 0 for tokens that are **masked**.
1613
+
1614
+ [What are attention masks?](../glossary#attention-mask)
1615
+
1616
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1617
+ [`PreTrainedTokenizer.__call__`] for details.
1618
+
1619
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1620
+ `past_key_values`).
1621
+
1622
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1623
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1624
+ information on the default strategy.
1625
+
1626
+ - 1 indicates the head is **not masked**,
1627
+ - 0 indicates the head is **masked**.
1628
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1629
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1630
+ config.n_positions - 1]`.
1631
+
1632
+ [What are position IDs?](../glossary#position-ids)
1633
+ past_key_values (`Cache`, *optional*):
1634
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1635
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1636
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1637
+
1638
+ Has to be an instance of [`~cache_utils.Cache`] instance, see our
1639
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
1640
+
1641
+ The model will output the same cache type that is fed as input. If no `past_key_values` are passed, the
1642
+ legacy cache format will be returned.
1643
+
1644
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1645
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1646
+ of shape `(batch_size, sequence_length)`.
1647
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1648
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1649
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1650
+ model's internal embedding lookup matrix.
1651
+ use_cache (`bool`, *optional*):
1652
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1653
+ `past_key_values`).
1654
+ output_attentions (`bool`, *optional*):
1655
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1656
+ tensors for more detail.
1657
+ output_hidden_states (`bool`, *optional*):
1658
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1659
+ more detail.
1660
+ return_dict (`bool`, *optional*):
1661
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1662
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1663
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1664
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1665
+ the complete sequence length.
1666
+ """
1667
+
1668
+
1669
+ class Emu3ForCausalLM(Emu3PreTrainedModel, GenerationMixin):
1670
+ _tied_weights_keys = ["lm_head.weight"]
1671
+ _tp_plan = {"lm_head": "colwise_rep"}
1672
+ config_class = Emu3TextConfig
1673
+
1674
+ def __init__(self, config):
1675
+ super().__init__(config)
1676
+ self.model = Emu3TextModel(config)
1677
+ self.vocab_size = config.vocab_size
1678
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1679
+
1680
+ # Initialize weights and apply final processing
1681
+ self.post_init()
1682
+
1683
+ def get_input_embeddings(self):
1684
+ return self.model.embed_tokens
1685
+
1686
+ def set_input_embeddings(self, value):
1687
+ self.model.embed_tokens = value
1688
+
1689
+ def get_output_embeddings(self):
1690
+ return self.lm_head
1691
+
1692
+ def set_output_embeddings(self, new_embeddings):
1693
+ self.lm_head = new_embeddings
1694
+
1695
+ def set_decoder(self, decoder):
1696
+ self.model = decoder
1697
+
1698
+ def get_decoder(self):
1699
+ return self.model
1700
+
1701
+ @add_start_docstrings_to_model_forward(EMU3_TEXT_INPUTS_DOCSTRING)
1702
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class="Emu3TextConfig")
1703
+ def forward(
1704
+ self,
1705
+ input_ids: torch.LongTensor = None,
1706
+ attention_mask: Optional[torch.Tensor] = None,
1707
+ position_ids: Optional[torch.LongTensor] = None,
1708
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1709
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1710
+ labels: Optional[torch.LongTensor] = None,
1711
+ use_cache: Optional[bool] = None,
1712
+ output_attentions: Optional[bool] = None,
1713
+ output_hidden_states: Optional[bool] = None,
1714
+ return_dict: Optional[bool] = None,
1715
+ cache_position: Optional[torch.LongTensor] = None,
1716
+ num_logits_to_keep: int = 0,
1717
+ **kwargs: Unpack[KwargsForCausalLM],
1718
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1719
+ r"""
1720
+ Args:
1721
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1722
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1723
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1724
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1725
+ num_logits_to_keep (`int`, *optional*):
1726
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
1727
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1728
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1729
+
1730
+ Returns:
1731
+
1732
+ Example:
1733
+
1734
+ ```python
1735
+ >>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
1736
+ >>> import torch
1737
+ >>> import requests
1738
+ >>> from PIL import Image
1739
+
1740
+ >>> model = Emu3ForCausalLM.from_pretrained("Emu3-community/Emu3-Chat-hf", torch_dtype=torch.bfloat16)
1741
+ >>> processor = Emu3Processor.from_pretrained("Emu3-community/Emu3-Chat-hf")
1742
+
1743
+ >>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device)
1744
+
1745
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
1746
+ >>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1747
+ ```"""
1748
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1749
+ output_hidden_states = (
1750
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1751
+ )
1752
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1753
+
1754
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1755
+ outputs = self.model(
1756
+ input_ids=input_ids,
1757
+ attention_mask=attention_mask,
1758
+ position_ids=position_ids,
1759
+ past_key_values=past_key_values,
1760
+ inputs_embeds=inputs_embeds,
1761
+ use_cache=use_cache,
1762
+ output_attentions=output_attentions,
1763
+ output_hidden_states=output_hidden_states,
1764
+ return_dict=return_dict,
1765
+ cache_position=cache_position,
1766
+ **kwargs,
1767
+ )
1768
+
1769
+ hidden_states = outputs[0]
1770
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1771
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
1772
+
1773
+ loss = None
1774
+ if labels is not None:
1775
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
1776
+
1777
+ if not return_dict:
1778
+ output = (logits,) + outputs[1:]
1779
+ return (loss,) + output if loss is not None else output
1780
+
1781
+ return CausalLMOutputWithPast(
1782
+ loss=loss,
1783
+ logits=logits,
1784
+ past_key_values=outputs.past_key_values,
1785
+ hidden_states=outputs.hidden_states,
1786
+ attentions=outputs.attentions,
1787
+ )
1788
+
1789
+
1790
+ class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin):
1791
+ def __init__(self, config):
1792
+ super().__init__(config)
1793
+ self.text_model = Emu3ForCausalLM._from_config(config.text_config)
1794
+ self.vqmodel = Emu3VQVAE(config.vq_config)
1795
+ self.vocabulary_mapping = Emu3ImageVocabularyMapping(config.vocabulary_map)
1796
+
1797
+ # Initialize weights and apply final processing
1798
+ self.post_init()
1799
+
1800
+ def get_input_embeddings(self):
1801
+ return self.text_model.get_input_embeddings()
1802
+
1803
+ def set_input_embeddings(self, value):
1804
+ self.text_model.set_input_embeddings(value)
1805
+
1806
+ def get_image_tokens(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):
1807
+ """
1808
+ Tokenizes images into discrete tokens with VQGAN module. Converts
1809
+ obtained image tokens into BPE tokens and wraps with "boi" and "eoi"
1810
+ special tokens.
1811
+
1812
+ Args:
1813
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1814
+ The tensors corresponding to the input images.
1815
+ image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
1816
+ The sizes of the images in the batch, being (height, width) for each image.
1817
+ """
1818
+ image_tokens_list = self.vqmodel.encode(pixel_values, image_sizes)
1819
+ bpe_tokens_list = [self.vocabulary_mapping.convert_img2bpe(tokens).flatten() for tokens in image_tokens_list]
1820
+ bpe_tokens = torch.cat(bpe_tokens_list)
1821
+ return bpe_tokens
1822
+
1823
+ @torch.no_grad
1824
+ def decode_image_tokens(self, image_tokens: torch.LongTensor, height: int, width: int):
1825
+ """
1826
+ Decodes generated image tokens from language model to continuous pixel values
1827
+ with VQGAN module via upsampling.
1828
+
1829
+ Args:
1830
+ image_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):
1831
+ The tensors corresponding to the input images.
1832
+ height (`int`):
1833
+ Height of the generated image before upsampling.
1834
+ width (`int`):
1835
+ Width of the generated image before upsampling.
1836
+ """
1837
+ sequences = image_tokens[:, :-3].view(-1, height, width + 1)
1838
+ image_tokens = self.vocabulary_mapping.convert_bpe2img(sequences)
1839
+ image = self.vqmodel.decode(image_tokens)
1840
+ return image
1841
+
1842
+ @add_start_docstrings_to_model_forward(EMU3_INPUTS_DOCSTRING)
1843
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1844
+ def forward(
1845
+ self,
1846
+ input_ids: torch.LongTensor = None,
1847
+ pixel_values: torch.FloatTensor = None,
1848
+ image_sizes: torch.Tensor = None,
1849
+ attention_mask: Optional[torch.Tensor] = None,
1850
+ position_ids: Optional[torch.LongTensor] = None,
1851
+ past_key_values: Optional[Cache] = None,
1852
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1853
+ use_cache: Optional[bool] = None,
1854
+ output_attentions: Optional[bool] = None,
1855
+ output_hidden_states: Optional[bool] = None,
1856
+ return_dict: Optional[bool] = None,
1857
+ cache_position: Optional[torch.LongTensor] = None,
1858
+ labels: Optional[torch.LongTensor] = None,
1859
+ num_logits_to_keep: int = 0,
1860
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1861
+ r"""
1862
+ Args:
1863
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1864
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1865
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1866
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1867
+ num_logits_to_keep (`int`, *optional*):
1868
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
1869
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1870
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1871
+
1872
+ Returns:
1873
+
1874
+ Example:
1875
+
1876
+ ```python
1877
+ >>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
1878
+ >>> import torch
1879
+ >>> import requests
1880
+ >>> from PIL import Image
1881
+
1882
+ >>> model = Emu3ForConditionalGeneration.from_pretrained("Emu3-community/Emu3-Chat-hf", torch_dtype=torch.bfloat16)
1883
+ >>> processor = Emu3Processor.from_pretrained("Emu3-community/Emu3-Chat-hf")
1884
+
1885
+ >>> conversation = [
1886
+ ... {
1887
+ ... "role": "system",
1888
+ ... "content": [
1889
+ ... {"type": "text", "text": "You are a helpful assistant."},
1890
+ ... ],
1891
+ ... },
1892
+ ... {
1893
+ ... "role": "user",
1894
+ ... "content": [
1895
+ ... {"type": "image"},
1896
+ ... {"type": "text", "text": "Please describe the image."},
1897
+ ... ],
1898
+ ... },
1899
+ ... ]
1900
+
1901
+ >>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
1902
+ >>> image = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
1903
+
1904
+ >>> inputs = processor(images=[image], text=[prompt], return_tensors="pt").to(model.device, torch.bfloat16)
1905
+
1906
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
1907
+ >>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1908
+ ```"""
1909
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1910
+ output_hidden_states = (
1911
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1912
+ )
1913
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1914
+
1915
+ if (input_ids is None) ^ (inputs_embeds is not None):
1916
+ raise ValueError(
1917
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
1918
+ )
1919
+
1920
+ if pixel_values is not None and inputs_embeds is not None:
1921
+ raise ValueError(
1922
+ "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one"
1923
+ )
1924
+
1925
+ if pixel_values is not None:
1926
+ image_tokens = self.get_image_tokens(pixel_values, image_sizes)
1927
+ special_image_mask = input_ids == self.vocabulary_mapping.image_token_id
1928
+ image_tokens = image_tokens.to(input_ids.device, input_ids.dtype)
1929
+ input_ids = input_ids.masked_scatter(special_image_mask, image_tokens)
1930
+
1931
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1932
+ outputs = self.text_model(
1933
+ input_ids=input_ids,
1934
+ attention_mask=attention_mask,
1935
+ position_ids=position_ids,
1936
+ past_key_values=past_key_values,
1937
+ inputs_embeds=inputs_embeds,
1938
+ use_cache=use_cache,
1939
+ output_attentions=output_attentions,
1940
+ output_hidden_states=output_hidden_states,
1941
+ return_dict=return_dict,
1942
+ cache_position=cache_position,
1943
+ num_logits_to_keep=num_logits_to_keep,
1944
+ )
1945
+
1946
+ return outputs
1947
+
1948
+
1949
+ __all__ = ["Emu3ForConditionalGeneration", "Emu3ForCausalLM", "Emu3TextModel", "Emu3PreTrainedModel", "Emu3VQVAE"]
janus/lib/python3.10/site-packages/transformers/models/emu3/modular_emu3.py ADDED
@@ -0,0 +1,1270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import math
18
+ from functools import cached_property
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+ import torch.nn.functional as F
24
+ import torch.utils.checkpoint
25
+
26
+ from ...cache_utils import Cache
27
+ from ...generation import GenerationMixin
28
+ from ...modeling_outputs import (
29
+ CausalLMOutputWithPast,
30
+ )
31
+ from ...modeling_utils import PreTrainedModel
32
+ from ...utils import (
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ is_flash_attn_2_available,
36
+ logging,
37
+ replace_return_docstrings,
38
+ )
39
+ from ..chameleon.modeling_chameleon import (
40
+ ChameleonPreTrainedModel,
41
+ ChameleonVQVAEEncoderConvDownsample,
42
+ )
43
+ from ..llama.modeling_llama import (
44
+ LlamaDecoderLayer,
45
+ LlamaForCausalLM,
46
+ LlamaModel,
47
+ )
48
+ from ..siglip.modeling_siglip import SiglipAttention
49
+ from .configuration_emu3 import Emu3Config, Emu3TextConfig, Emu3VQVAEConfig
50
+
51
+
52
+ if is_flash_attn_2_available():
53
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
54
+
55
+
56
+ _CONFIG_FOR_DOC = "Emu3Config"
57
+ _CHECKPOINT_FOR_DOC = "Emu3-community/Emu3-Chat-hf"
58
+
59
+ logger = logging.get_logger(__name__)
60
+
61
+
62
+ # Has extra dropout which no other model in the library has
63
+ class Emu3DecoderLayer(LlamaDecoderLayer):
64
+ def __init__(self, config: Emu3Config, layer_idx: int):
65
+ super().__init__(config, layer_idx)
66
+ self.dropout = nn.Dropout(config.attention_dropout)
67
+
68
+ def forward(
69
+ self,
70
+ hidden_states: torch.Tensor,
71
+ attention_mask: Optional[torch.Tensor] = None,
72
+ position_ids: Optional[torch.LongTensor] = None,
73
+ past_key_value: Optional[Cache] = None,
74
+ output_attentions: Optional[bool] = False,
75
+ use_cache: Optional[bool] = False,
76
+ cache_position: Optional[torch.LongTensor] = None,
77
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
78
+ **kwargs,
79
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
80
+ """
81
+ Args:
82
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
83
+ attention_mask (`torch.FloatTensor`, *optional*):
84
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
85
+ query_sequence_length, key_sequence_length)` if default attention is used.
86
+ output_attentions (`bool`, *optional*):
87
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
88
+ returned tensors for more detail.
89
+ use_cache (`bool`, *optional*):
90
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
91
+ (see `past_key_values`).
92
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
93
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
94
+ Indices depicting the position of the input sequence tokens in the sequence
95
+ kwargs (`dict`, *optional*):
96
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
97
+ into the model
98
+ """
99
+ residual = hidden_states
100
+
101
+ hidden_states = self.input_layernorm(hidden_states)
102
+
103
+ # Self Attention
104
+ hidden_states, self_attn_weights = self.self_attn(
105
+ hidden_states=hidden_states,
106
+ attention_mask=attention_mask,
107
+ position_ids=position_ids,
108
+ past_key_value=past_key_value,
109
+ output_attentions=output_attentions,
110
+ use_cache=use_cache,
111
+ cache_position=cache_position,
112
+ position_embeddings=position_embeddings,
113
+ **kwargs,
114
+ )
115
+ hidden_states = residual + self.dropout(hidden_states)
116
+
117
+ # Fully Connected
118
+ residual = hidden_states
119
+ hidden_states = self.post_attention_layernorm(hidden_states)
120
+ hidden_states = self.mlp(hidden_states)
121
+ hidden_states = residual + self.dropout(hidden_states)
122
+
123
+ outputs = (hidden_states,)
124
+
125
+ if output_attentions:
126
+ outputs += (self_attn_weights,)
127
+
128
+ return outputs
129
+
130
+
131
+ class Emu3VQVAEVectorQuantizer(nn.Module):
132
+ """
133
+ A module for vector quantization using learned embedding vectors.
134
+
135
+ This module implements the quantization process similar to te one described in
136
+ the VQ-VAE (Vector Quantized Variational AutoEncoder) paper. It quantizes continuous
137
+ input vectors into discrete codebook vectors, which are learned during training.
138
+ Current implementation improves over previous ones by avoiding costly matrix multiplications
139
+ and allowing for post-hoc remapping of indices.
140
+ """
141
+
142
+ def __init__(self, config: Emu3VQVAEConfig):
143
+ super().__init__()
144
+ self.embedding = nn.Embedding(config.codebook_size, config.embed_dim)
145
+ self.embedding.weight.data.uniform_(-1.0 / config.codebook_size, 1.0 / config.codebook_size)
146
+
147
+ def forward(self, hidden_state: torch.Tensor):
148
+ batch_size, temporal, channels, height, width = hidden_state.shape
149
+ hidden_state = hidden_state.permute(0, 1, 3, 4, 2).contiguous()
150
+ hidden_state_flattened = hidden_state.view(-1, channels)
151
+
152
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
153
+ hidden_state_sum = torch.sum(hidden_state_flattened**2, dim=1, keepdim=True)
154
+ embedding_sum = torch.sum(self.embedding.weight**2, dim=1)
155
+
156
+ # "bd,dn->bn",
157
+ distances = 2 * torch.matmul(hidden_state_flattened, self.embedding.weight.transpose(0, 1))
158
+ distances = hidden_state_sum + embedding_sum - distances
159
+
160
+ min_encoding_indices = torch.argmin(distances, dim=1)
161
+ min_encoding_indices = min_encoding_indices.view(batch_size, temporal, height, width)
162
+ return min_encoding_indices
163
+
164
+
165
+ class Emu3VQVAEEncoderConvDownsample(ChameleonVQVAEEncoderConvDownsample):
166
+ pass
167
+
168
+
169
+ class Emu3VQVAEEncoderConvUpsample(nn.Module):
170
+ def __init__(self, in_channels):
171
+ super().__init__()
172
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
173
+
174
+ def forward(self, hidden_states):
175
+ hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
176
+ hidden_states = self.conv(hidden_states)
177
+ return hidden_states
178
+
179
+
180
+ class Emu3VQVAEConv3d(nn.Module):
181
+ def __init__(
182
+ self,
183
+ in_channel: int,
184
+ out_channel: int,
185
+ kernel_size: Tuple[int],
186
+ stride: Tuple[int],
187
+ ):
188
+ super().__init__()
189
+
190
+ padding_sizes = [one_kernel - one_stride for one_kernel, one_stride in zip(kernel_size[1:], stride[1:])]
191
+ self.padding = ()
192
+ for pad_size in padding_sizes[::-1]:
193
+ self.padding += (pad_size // 2 + pad_size % 2, pad_size // 2)
194
+ self.padding += (2, 0)
195
+
196
+ self.conv = nn.Conv3d(
197
+ in_channel,
198
+ out_channel,
199
+ kernel_size,
200
+ stride=stride,
201
+ )
202
+
203
+ def forward(self, hidden_states: torch.Tensor):
204
+ hidden_states = F.pad(hidden_states, self.padding)
205
+ hidden_states = self.conv(hidden_states)
206
+ return hidden_states
207
+
208
+
209
+ class Emu3VQVAESpatialNorm(nn.Module):
210
+ def __init__(
211
+ self,
212
+ in_channels: int,
213
+ out_channels: int,
214
+ ):
215
+ super().__init__()
216
+ self.norm_layer = nn.GroupNorm(
217
+ num_channels=out_channels,
218
+ num_groups=32,
219
+ eps=1e-6,
220
+ affine=True,
221
+ )
222
+
223
+ self.conv_y = nn.Conv2d(
224
+ in_channels,
225
+ out_channels,
226
+ kernel_size=1,
227
+ stride=1,
228
+ padding=0,
229
+ )
230
+ self.conv_b = nn.Conv2d(
231
+ in_channels,
232
+ out_channels,
233
+ kernel_size=1,
234
+ stride=1,
235
+ padding=0,
236
+ )
237
+
238
+ def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
239
+ quant_states = F.interpolate(quant_states, size=hidden_states.shape[-2:], mode="nearest")
240
+ hidden_states = self.norm_layer(hidden_states)
241
+ hidden_states = hidden_states * self.conv_y(quant_states) + self.conv_b(quant_states)
242
+ return hidden_states
243
+
244
+
245
+ class Emu3VQVAETemporalUpsample(nn.Module):
246
+ def __init__(
247
+ self,
248
+ in_channel: int,
249
+ out_channel: int,
250
+ ):
251
+ super().__init__()
252
+ self.conv = Emu3VQVAEConv3d(
253
+ in_channel,
254
+ out_channel,
255
+ kernel_size=(3, 3, 3),
256
+ stride=(1, 1, 1),
257
+ )
258
+
259
+ def forward(self, hidden_states: torch.Tensor):
260
+ batch_size, channels, temporal, height, width = hidden_states.shape
261
+ hidden_states = hidden_states.permute(0, 1, 3, 4, 2).contiguous().view(batch_size, -1, temporal)
262
+ hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode="nearest")
263
+ hidden_states = hidden_states.view(batch_size, channels, height, width, -1).permute(0, 1, 4, 2, 3).contiguous()
264
+ hidden_states = self.conv(hidden_states)
265
+ return hidden_states
266
+
267
+
268
+ class Emu3VQVAETemporalDownsample(nn.Module):
269
+ def __init__(
270
+ self,
271
+ in_channel: int,
272
+ out_channel: int,
273
+ ):
274
+ super().__init__()
275
+ self.conv = Emu3VQVAEConv3d(
276
+ in_channel,
277
+ out_channel,
278
+ kernel_size=(4, 3, 3),
279
+ stride=(2, 1, 1),
280
+ )
281
+
282
+ def forward(self, hidden_states: torch.Tensor):
283
+ hidden_states = self.conv(hidden_states)
284
+ return hidden_states
285
+
286
+
287
+ class Emu3VQVAETemporalResnetBlock(nn.Module):
288
+ def __init__(
289
+ self,
290
+ in_channels,
291
+ out_channels=None,
292
+ ):
293
+ super().__init__()
294
+ self.in_channels = in_channels
295
+ self.out_channels = in_channels if out_channels is None else out_channels
296
+
297
+ self.norm1 = nn.BatchNorm3d(in_channels)
298
+ self.conv1 = Emu3VQVAEConv3d(
299
+ in_channels,
300
+ out_channels,
301
+ kernel_size=(3, 3, 3),
302
+ stride=(1, 1, 1),
303
+ )
304
+ self.norm2 = nn.BatchNorm3d(out_channels)
305
+ self.conv2 = Emu3VQVAEConv3d(
306
+ out_channels,
307
+ out_channels,
308
+ kernel_size=(3, 3, 3),
309
+ stride=(1, 1, 1),
310
+ )
311
+ if self.in_channels != self.out_channels:
312
+ self.nin_shortcut = nn.Conv3d(
313
+ in_channels,
314
+ out_channels,
315
+ kernel_size=1,
316
+ stride=1,
317
+ padding=0,
318
+ )
319
+
320
+ def forward(self, hidden_states):
321
+ residual = hidden_states
322
+ hidden_states = self.norm1(hidden_states)
323
+ hidden_states *= torch.sigmoid(hidden_states)
324
+ hidden_states = self.conv1(hidden_states)
325
+
326
+ hidden_states = self.norm2(hidden_states)
327
+ hidden_states *= torch.sigmoid(hidden_states)
328
+ hidden_states = self.conv2(hidden_states)
329
+
330
+ if self.in_channels != self.out_channels:
331
+ residual = self.nin_shortcut(residual)
332
+
333
+ return residual + hidden_states
334
+
335
+
336
+ class Emu3VQVAEResnetBlock(nn.Module):
337
+ def __init__(
338
+ self,
339
+ in_channels: int,
340
+ out_channels: Optional[int] = None,
341
+ quant_channels: Optional[int] = None,
342
+ ):
343
+ super().__init__()
344
+ self.in_channels = in_channels
345
+ out_channels = in_channels if out_channels is None else out_channels
346
+ self.out_channels = out_channels
347
+ self.quant_channels = quant_channels
348
+
349
+ if quant_channels is None:
350
+ self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
351
+ self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-6, affine=True)
352
+ else:
353
+ self.norm1 = Emu3VQVAESpatialNorm(quant_channels, in_channels)
354
+ self.norm2 = Emu3VQVAESpatialNorm(quant_channels, out_channels)
355
+
356
+ self.conv1 = nn.Conv2d(
357
+ in_channels,
358
+ out_channels,
359
+ kernel_size=3,
360
+ stride=1,
361
+ padding=1,
362
+ )
363
+
364
+ self.conv2 = nn.Conv2d(
365
+ out_channels,
366
+ out_channels,
367
+ kernel_size=3,
368
+ stride=1,
369
+ padding=1,
370
+ )
371
+
372
+ if self.in_channels != self.out_channels:
373
+ self.nin_shortcut = nn.Conv2d(
374
+ in_channels,
375
+ out_channels,
376
+ kernel_size=1,
377
+ stride=1,
378
+ padding=0,
379
+ )
380
+
381
+ def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor] = None):
382
+ norm_args = () if self.quant_channels is None else (quant_channels,)
383
+
384
+ residual = hidden_states
385
+ hidden_states = self.norm1(hidden_states, *norm_args)
386
+ hidden_states *= torch.sigmoid(hidden_states)
387
+ hidden_states = self.conv1(hidden_states)
388
+
389
+ hidden_states = self.norm2(hidden_states, *norm_args)
390
+ hidden_states *= torch.sigmoid(hidden_states)
391
+ hidden_states = self.conv2(hidden_states)
392
+
393
+ if self.in_channels != self.out_channels:
394
+ residual = self.nin_shortcut(residual)
395
+
396
+ return residual + hidden_states
397
+
398
+
399
+ class Emu3VQVAEAttentionBlock(SiglipAttention):
400
+ pass
401
+
402
+
403
+ class Emu3VQVAEGroupNorm(nn.GroupNorm):
404
+ """
405
+ Same as the torch GroupNorm with the only difference that this ones accepts
406
+ an optional kwarg `quant_states` which is not used. This class makes it easier to
407
+ use SpatialNorm or GroupNorm without conditionals
408
+ """
409
+
410
+ def __init__(self, **kwargs):
411
+ super().__init__(**kwargs)
412
+
413
+ def forward(self, input, quant_states=None):
414
+ return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
415
+
416
+
417
+ class Emu3VQVAEMiddleBlock(nn.Module):
418
+ def __init__(self, config, in_channels, quant_channels=None):
419
+ super().__init__()
420
+
421
+ self.block_1 = Emu3VQVAEResnetBlock(
422
+ in_channels=in_channels,
423
+ out_channels=in_channels,
424
+ quant_channels=quant_channels,
425
+ )
426
+ self.attn_1 = Emu3VQVAEAttentionBlock(config)
427
+ if quant_channels is None:
428
+ self.attn_norm = Emu3VQVAEGroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
429
+ else:
430
+ self.attn_norm = Emu3VQVAESpatialNorm(quant_channels, in_channels)
431
+
432
+ self.block_2 = Emu3VQVAEResnetBlock(
433
+ in_channels=in_channels,
434
+ out_channels=in_channels,
435
+ quant_channels=quant_channels,
436
+ )
437
+
438
+ def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor = None):
439
+ hidden_states = self.block_1(hidden_states, quant_states)
440
+ residual = hidden_states
441
+ hidden_states = self.attn_norm(hidden_states, quant_states)
442
+ batch_size, channels, height, width = hidden_states.shape
443
+ hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
444
+ hidden_states = self.attn_1(hidden_states)[0]
445
+ hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
446
+ hidden_states = residual + hidden_states
447
+ hidden_states = self.block_2(hidden_states, quant_states)
448
+ return hidden_states
449
+
450
+
451
+ class Emu3VQVAEDownBlock(nn.Module):
452
+ def __init__(self, config):
453
+ super().__init__()
454
+
455
+ self.num_resolutions = len(config.channel_multiplier)
456
+ self.num_res_blocks = config.num_res_blocks
457
+ base_channels = config.base_channels
458
+ channel_multiplier = config.channel_multiplier
459
+
460
+ in_channel_multiplier = (1,) + tuple(channel_multiplier)
461
+ self.in_channel_multiplier = in_channel_multiplier
462
+ self.down = nn.ModuleList()
463
+ for i_level in range(self.num_resolutions):
464
+ block = nn.ModuleList()
465
+ attn = nn.ModuleList()
466
+ attn_norms = nn.ModuleList()
467
+ block_in = base_channels * in_channel_multiplier[i_level]
468
+ block_out = base_channels * channel_multiplier[i_level]
469
+ for i_block in range(self.num_res_blocks):
470
+ block.append(
471
+ Emu3VQVAEResnetBlock(
472
+ in_channels=block_in,
473
+ out_channels=block_out,
474
+ )
475
+ )
476
+ block_in = block_out
477
+ if config.attn_resolutions is not None and i_level in config.attn_resolutions:
478
+ attn.append(Emu3VQVAEAttentionBlock(config))
479
+ attn_norms.append(nn.GroupNorm(num_channels=block_in, num_groups=32, eps=1e-6, affine=True))
480
+
481
+ down = nn.Module()
482
+ down.block = block
483
+ down.attn = attn
484
+ down.attn_norms = attn_norms
485
+ if i_level != self.num_resolutions - 1:
486
+ down.downsample = Emu3VQVAEEncoderConvDownsample(block_in)
487
+ self.down.append(down)
488
+
489
+ def forward(self, hidden_states: torch.FloatTensor):
490
+ for i_level, blocks in enumerate(self.down):
491
+ for i_block in range(self.num_res_blocks):
492
+ hidden_states = blocks.block[i_block](hidden_states)
493
+ if len(blocks.attn) > 0:
494
+ residual = hidden_states
495
+ hidden_states = blocks.attn_norms[i_block](hidden_states)
496
+
497
+ batch_size, channels, height, width = hidden_states.shape
498
+ hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
499
+ hidden_states = blocks.attn[i_block](hidden_states)[0]
500
+
501
+ hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
502
+ hidden_states = residual + hidden_states
503
+
504
+ if i_level != self.num_resolutions - 1:
505
+ hidden_states = blocks.downsample(hidden_states)
506
+
507
+ return hidden_states
508
+
509
+
510
+ class Emu3VQVAEUpBlock(nn.Module):
511
+ def __init__(self, config):
512
+ super().__init__()
513
+
514
+ self.num_resolutions = len(config.channel_multiplier)
515
+ self.num_res_blocks = config.num_res_blocks
516
+
517
+ quant_channels = config.embed_dim
518
+ block_in = config.base_channels * config.channel_multiplier[-1]
519
+
520
+ self.up = nn.ModuleList()
521
+ for i_level in reversed(range(self.num_resolutions)):
522
+ block = nn.ModuleList()
523
+ attn = nn.ModuleList()
524
+ attn_norms = nn.ModuleList()
525
+ block_out = config.base_channels * config.channel_multiplier[i_level]
526
+ for i_block in range(self.num_res_blocks + 1):
527
+ block.append(
528
+ Emu3VQVAEResnetBlock(
529
+ in_channels=block_in,
530
+ out_channels=block_out,
531
+ quant_channels=quant_channels,
532
+ )
533
+ )
534
+ block_in = block_out
535
+ if i_level in config.attn_resolutions:
536
+ attn.append(Emu3VQVAEAttentionBlock(config))
537
+ attn_norms.append(Emu3VQVAESpatialNorm(quant_channels, block_in))
538
+
539
+ up = nn.Module()
540
+ up.block = block
541
+ up.attn = attn
542
+ up.attn_norms = attn_norms
543
+ if i_level != 0:
544
+ up.upsample = Emu3VQVAEEncoderConvUpsample(block_in)
545
+
546
+ self.up.insert(0, up)
547
+
548
+ def forward(self, hidden_states: torch.FloatTensor, quant_states: torch.FloatTensor):
549
+ for i_level, blocks in enumerate(self.up[::-1]):
550
+ for i_block in range(self.num_res_blocks + 1):
551
+ hidden_states = blocks.block[i_block](hidden_states, quant_states)
552
+ if len(blocks.attn) > 0:
553
+ residual = hidden_states
554
+ hidden_states = blocks.attn_norms[i_block](hidden_states, quant_states)
555
+
556
+ batch_size, channels, height, width = hidden_states.shape
557
+ hidden_states = hidden_states.view(batch_size, channels, height * width).transpose(1, 2)
558
+ hidden_states = blocks.attn[i_block](hidden_states)[0]
559
+
560
+ hidden_states = hidden_states.reshape(batch_size, height, width, channels).permute(0, 3, 1, 2)
561
+ hidden_states = residual + hidden_states
562
+ if i_level != len(self.up) - 1:
563
+ hidden_states = blocks.upsample(hidden_states)
564
+
565
+ return hidden_states
566
+
567
+
568
+ class Emu3VQVAEEncoder(nn.Module):
569
+ def __init__(self, config):
570
+ super().__init__()
571
+
572
+ base_channels = config.base_channels
573
+ in_channels = config.in_channels
574
+ double_latent = config.double_latent
575
+ latent_channels = config.latent_channels
576
+ channel_multiplier = config.channel_multiplier
577
+ out_channels = 2 * latent_channels if double_latent else latent_channels
578
+ block_in = base_channels * channel_multiplier[-1]
579
+
580
+ self.conv_in = torch.nn.Conv2d(in_channels, base_channels, kernel_size=3, stride=1, padding=1)
581
+ self.down_block = Emu3VQVAEDownBlock(config)
582
+ self.middle_block = Emu3VQVAEMiddleBlock(config, block_in)
583
+
584
+ self.norm_out = torch.nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
585
+ self.conv_out = torch.nn.Conv2d(
586
+ block_in,
587
+ out_channels,
588
+ kernel_size=3,
589
+ stride=1,
590
+ padding=1,
591
+ )
592
+
593
+ temporal_down_blocks = int(math.log2(config.temporal_downsample_factor))
594
+ self.time_conv = nn.ModuleList()
595
+ self.time_res_stack = nn.ModuleList()
596
+
597
+ for i in range(temporal_down_blocks):
598
+ conv = Emu3VQVAETemporalDownsample(out_channels, out_channels)
599
+ self.time_conv.append(conv)
600
+
601
+ for _ in range(config.num_res_blocks):
602
+ time_res_conv = Emu3VQVAETemporalResnetBlock(
603
+ in_channels=out_channels,
604
+ out_channels=out_channels,
605
+ )
606
+ self.time_res_stack.append(time_res_conv)
607
+
608
+ def forward(self, pixel_values: torch.LongTensor):
609
+ temporal_dim = pixel_values.shape[1]
610
+ pixel_values = pixel_values.reshape(-1, *pixel_values.shape[2:])
611
+
612
+ # downsampling & middle
613
+ hidden_states = self.conv_in(pixel_values)
614
+ hidden_states = self.down_block(hidden_states)
615
+ hidden_states = self.middle_block(hidden_states)
616
+
617
+ # end
618
+ hidden_states = self.norm_out(hidden_states)
619
+ hidden_states *= torch.sigmoid(hidden_states)
620
+ hidden_states = self.conv_out(hidden_states)
621
+
622
+ hidden_states = hidden_states.reshape(-1, temporal_dim, *hidden_states.shape[1:])
623
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
624
+
625
+ # temporal convs
626
+ for conv in self.time_conv:
627
+ hidden_states = conv(hidden_states)
628
+ hidden_states *= torch.sigmoid(hidden_states)
629
+
630
+ for layer in self.time_res_stack:
631
+ hidden_states = layer(hidden_states)
632
+
633
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
634
+
635
+ return hidden_states
636
+
637
+
638
+ class Emu3VQVAEDecoder(nn.Module):
639
+ def __init__(self, config: Emu3VQVAEConfig):
640
+ super().__init__()
641
+
642
+ quant_channels = config.embed_dim
643
+ block_in = config.base_channels * config.channel_multiplier[-1]
644
+ self.time_res_stack = nn.ModuleList()
645
+ for _ in range(config.num_res_blocks):
646
+ time_res_conv = Emu3VQVAETemporalResnetBlock(
647
+ in_channels=config.latent_channels, out_channels=config.latent_channels
648
+ )
649
+ self.time_res_stack.append(time_res_conv)
650
+
651
+ temp_upsample_block_num = int(math.log2(config.temporal_downsample_factor))
652
+ self.time_conv = nn.ModuleList()
653
+ for i in range(temp_upsample_block_num):
654
+ conv = Emu3VQVAETemporalUpsample(config.latent_channels, config.latent_channels)
655
+ self.time_conv.append(conv)
656
+
657
+ self.conv_in = nn.Conv2d(
658
+ config.latent_channels,
659
+ block_in,
660
+ kernel_size=3,
661
+ stride=1,
662
+ padding=1,
663
+ )
664
+
665
+ self.middle_block = Emu3VQVAEMiddleBlock(config, block_in, quant_channels=quant_channels)
666
+ self.up_block = Emu3VQVAEUpBlock(config)
667
+
668
+ block_in = config.base_channels * config.channel_multiplier[0]
669
+ self.norm_out = Emu3VQVAESpatialNorm(quant_channels, block_in)
670
+ self.conv_out = nn.Conv2d(
671
+ block_in,
672
+ config.out_channels,
673
+ kernel_size=3,
674
+ stride=1,
675
+ padding=1,
676
+ )
677
+
678
+ def forward(self, hidden_states: torch.Tensor, quant_states: torch.Tensor):
679
+ hidden_quant_states = torch.cat((hidden_states, quant_states), dim=0)
680
+ hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
681
+
682
+ # temporal convs
683
+ for layer in self.time_res_stack:
684
+ hidden_quant_states = layer(hidden_quant_states)
685
+
686
+ for layer in self.time_conv:
687
+ hidden_quant_states = layer(hidden_quant_states)
688
+ hidden_quant_states *= torch.sigmoid(hidden_quant_states)
689
+
690
+ hidden_quant_states = hidden_quant_states.permute(0, 2, 1, 3, 4)
691
+ hidden_states, quant_states = torch.chunk(hidden_quant_states, 2, dim=0)
692
+ hidden_states = hidden_states.reshape(-1, *hidden_states.shape[2:])
693
+ quant_states = quant_states.reshape(-1, *quant_states.shape[2:])
694
+
695
+ hidden_states = self.conv_in(hidden_states)
696
+
697
+ # middle & upsampling
698
+ hidden_states = self.middle_block(hidden_states, quant_states)
699
+ hidden_states = self.up_block(hidden_states, quant_states)
700
+
701
+ hidden_states = self.norm_out(hidden_states, quant_states)
702
+ hidden_states *= torch.sigmoid(hidden_states)
703
+ hidden_states = self.conv_out(hidden_states)
704
+
705
+ return hidden_states
706
+
707
+
708
+ EMU3_VQ_START_DOCSTRING = r"""
709
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
710
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
711
+ etc.)
712
+
713
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
714
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
715
+ and behavior.
716
+
717
+ Parameters:
718
+ config ([`Emu3VQVAEConfig`]):
719
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
720
+ load the weights associated with the model, only the configuration. Check out the
721
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
722
+ """
723
+
724
+
725
+ @add_start_docstrings(
726
+ """The VQ-VAE model used in Emu3 for encoding/decoding images into discrete tokens.
727
+ This model follows the "Make-a-scene: Scene-based text-to-image generation with human priors" paper from
728
+ [ Oran Gafni, Adam Polyak, Oron Ashual, Shelly Sheynin, Devi Parikh, and Yaniv Taigman](https://arxiv.org/abs/2203.13131).
729
+ """,
730
+ EMU3_VQ_START_DOCSTRING,
731
+ )
732
+ class Emu3VQVAE(PreTrainedModel):
733
+ config_class = Emu3VQVAEConfig
734
+ base_model_prefix = "emuvideovq"
735
+ main_input_name = "pixel_values"
736
+ _no_split_modules = [
737
+ "Emu3VQVAETemporalResnetBlock",
738
+ "Emu3VQVAEAttentionBlock",
739
+ "Emu3VQVAEResnetBlock",
740
+ "Emu3VQVAEVectorQuantizer",
741
+ ]
742
+
743
+ def _init_weights(self, module):
744
+ if isinstance(module, (nn.Conv2d, nn.Conv3d)):
745
+ nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
746
+ elif isinstance(module, nn.Linear):
747
+ nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
748
+ if module.bias is not None:
749
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
750
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
751
+ nn.init.uniform_(module.bias, -bound, bound)
752
+ elif isinstance(module, (nn.BatchNorm2d, nn.BatchNorm3d, nn.GroupNorm)):
753
+ nn.init.constant_(module.weight, 1)
754
+ nn.init.constant_(module.bias, 0)
755
+
756
+ def __init__(self, config: Emu3VQVAEConfig):
757
+ super().__init__(config)
758
+
759
+ self.config = config
760
+
761
+ self.encoder = Emu3VQVAEEncoder(config)
762
+ self.decoder = Emu3VQVAEDecoder(config)
763
+ self.quantize = Emu3VQVAEVectorQuantizer(config)
764
+ self.vision_spatial_factor = 2 ** (len(config.channel_multiplier) - 1)
765
+
766
+ self.quant_conv = Emu3VQVAEConv3d(
767
+ config.latent_channels, config.embed_dim, kernel_size=(3, 1, 1), stride=(1, 1, 1)
768
+ )
769
+ self.post_quant_conv = Emu3VQVAEConv3d(
770
+ config.embed_dim, config.latent_channels, kernel_size=(3, 1, 1), stride=(1, 1, 1)
771
+ )
772
+ self.spatial_scale_factor = 2 ** (len(config.channel_multiplier) - 1)
773
+ self.eval() # Emu3's VQ model is frozen
774
+
775
+ self.post_init()
776
+
777
+ def encode(self, pixel_values: torch.Tensor, image_sizes: torch.Tensor):
778
+ is_image = pixel_values.ndim == 4
779
+ if is_image:
780
+ temporal = self.config.temporal_downsample_factor
781
+ batch_size, channels, height, width = pixel_values.shape
782
+ pixel_values = pixel_values.unsqueeze(1).repeat(1, temporal, 1, 1, 1)
783
+ else:
784
+ batch_size, temporal, channels, height, width = pixel_values.shape
785
+
786
+ hidden_states = self.encoder(pixel_values)
787
+
788
+ # b t c h w -> b c t h w
789
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
790
+ hidden_states = self.quant_conv(hidden_states)
791
+
792
+ # b c t h w -> b t c h w
793
+ hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
794
+ codes = self.quantize(hidden_states)
795
+
796
+ image_tokens = codes.squeeze(1) if is_image else codes
797
+
798
+ image_tokens = [
799
+ single_image[: int(size[0] / self.vision_spatial_factor), : int(size[1] / self.vision_spatial_factor)]
800
+ for single_image, size in zip(image_tokens, image_sizes)
801
+ ]
802
+
803
+ return image_tokens
804
+
805
+ def decode(self, hidden_states: torch.Tensor):
806
+ is_image = hidden_states.ndim == 3
807
+ if is_image:
808
+ hidden_states = hidden_states.unsqueeze(1)
809
+
810
+ batch_size, temporal, height, width = hidden_states.shape
811
+ quant = self.quantize.embedding(hidden_states.flatten())
812
+
813
+ channels = quant.shape[-1]
814
+ quant = quant.view(batch_size, temporal, height, width, channels).permute(0, 4, 1, 2, 3).contiguous()
815
+ post_quant = self.post_quant_conv(quant)
816
+
817
+ quant = quant.permute(0, 2, 1, 3, 4)
818
+ post_quant = post_quant.permute(0, 2, 1, 3, 4)
819
+
820
+ video = self.decoder(post_quant, quant)
821
+ video = video.reshape(
822
+ batch_size,
823
+ temporal * self.config.temporal_downsample_factor,
824
+ self.config.out_channels,
825
+ height * self.spatial_scale_factor,
826
+ width * self.spatial_scale_factor,
827
+ )
828
+ return video[:, 0] if is_image else video
829
+
830
+
831
+ class Emu3ImageVocabularyMapping:
832
+ """
833
+ A class for mapping discrete image tokens from VQGAN to BPE tokens.
834
+ """
835
+
836
+ def __init__(self, vocab_map):
837
+ self.vocab_map = vocab_map
838
+ self.eol_token_id = vocab_map.get("<|extra_200|>")
839
+ self.image_token_id = vocab_map.get("<image>")
840
+
841
+ @cached_property
842
+ def image_tokens(self):
843
+ return sorted([val for name, val in self.vocab_map.items() if name.startswith("<|visual token")])
844
+
845
+ @cached_property
846
+ def image_tokens_str(self):
847
+ return sorted([name for name, val in self.vocab_map.items() if name.startswith("<|visual token")])
848
+
849
+ @cached_property
850
+ def img2bpe(self):
851
+ return {int(token[-8:-2]): self.vocab_map[token] for token in self.image_tokens_str}
852
+
853
+ @cached_property
854
+ def bpe2img(self):
855
+ return {v: k for k, v in self.img2bpe.items()}
856
+
857
+ @cached_property
858
+ def bpe2img_mapping_tensor(self):
859
+ mapping = torch.zeros(max(self.bpe2img.keys()) + 1, dtype=torch.int)
860
+ for k, v in self.bpe2img.items():
861
+ mapping[k] = v
862
+ return mapping
863
+
864
+ @cached_property
865
+ def img2bpe_mapping_tensor(self):
866
+ mapping = torch.zeros(max(self.img2bpe.keys()) + 1, dtype=torch.int)
867
+ for k, v in self.img2bpe.items():
868
+ mapping[k] = v
869
+ return mapping
870
+
871
+ def convert_img2bpe(self, img_batch: List[torch.Tensor]) -> torch.Tensor:
872
+ device = img_batch.device
873
+ eol_row = torch.ones((img_batch.shape[0], 1), dtype=torch.int) * self.eol_token_id
874
+ img_tokens = self.img2bpe_mapping_tensor[img_batch.to("cpu")]
875
+ img_tokens = torch.cat([img_tokens, eol_row], dim=-1)
876
+ return img_tokens.to(device)
877
+
878
+ def convert_bpe2img(self, img_batch: torch.Tensor) -> torch.Tensor:
879
+ device = img_batch.device
880
+ img_batch = img_batch[..., :-1] # remove last row of EOL tokens
881
+ img_tokens = self.bpe2img_mapping_tensor[img_batch.to("cpu")]
882
+ return img_tokens.to(device)
883
+
884
+
885
+ class Emu3PreTrainedModel(ChameleonPreTrainedModel, Emu3VQVAE):
886
+ _no_split_modules = [
887
+ "Emu3DecoderLayer",
888
+ ]
889
+ _supports_flex_attn = True
890
+
891
+ def _init_weights(self, module):
892
+ std = self.config.get_text_config().initializer_range
893
+ if isinstance(module, Emu3VQVAE):
894
+ module.apply(module._init_weights)
895
+ elif isinstance(module, (nn.Linear, nn.Conv2d)):
896
+ module.weight.data.normal_(mean=0.0, std=std)
897
+ if module.bias is not None:
898
+ module.bias.data.zero_()
899
+ elif isinstance(module, nn.Embedding):
900
+ module.weight.data.normal_(mean=0.0, std=std)
901
+ if module.padding_idx is not None:
902
+ module.weight.data[module.padding_idx].zero_()
903
+
904
+
905
+ EMU3_TEXT_INPUTS_DOCSTRING = r"""
906
+ Args:
907
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
908
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
909
+ it.
910
+
911
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
912
+ [`PreTrainedTokenizer.__call__`] for details.
913
+
914
+ [What are input IDs?](../glossary#input-ids)
915
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
916
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
917
+
918
+ - 1 for tokens that are **not masked**,
919
+ - 0 for tokens that are **masked**.
920
+
921
+ [What are attention masks?](../glossary#attention-mask)
922
+
923
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
924
+ [`PreTrainedTokenizer.__call__`] for details.
925
+
926
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
927
+ `past_key_values`).
928
+
929
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
930
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
931
+ information on the default strategy.
932
+
933
+ - 1 indicates the head is **not masked**,
934
+ - 0 indicates the head is **masked**.
935
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
936
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
937
+ config.n_positions - 1]`.
938
+
939
+ [What are position IDs?](../glossary#position-ids)
940
+ past_key_values (`Cache`, *optional*):
941
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
942
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
943
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
944
+
945
+ Has to be an instance of [`~cache_utils.Cache`] instance, see our
946
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
947
+
948
+ The model will output the same cache type that is fed as input. If no `past_key_values` are passed, the
949
+ legacy cache format will be returned.
950
+
951
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
952
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
953
+ of shape `(batch_size, sequence_length)`.
954
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
955
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
956
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
957
+ model's internal embedding lookup matrix.
958
+ use_cache (`bool`, *optional*):
959
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
960
+ `past_key_values`).
961
+ output_attentions (`bool`, *optional*):
962
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
963
+ tensors for more detail.
964
+ output_hidden_states (`bool`, *optional*):
965
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
966
+ more detail.
967
+ return_dict (`bool`, *optional*):
968
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
969
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
970
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
971
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
972
+ the complete sequence length.
973
+ """
974
+
975
+
976
+ EMU3_INPUTS_DOCSTRING = r"""
977
+ Args:
978
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
979
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
980
+ it.
981
+
982
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
983
+ [`PreTrainedTokenizer.__call__`] for details.
984
+
985
+ [What are input IDs?](../glossary#input-ids)
986
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, max_num_images, max_num_tiles, channels, image_size, image_size)):
987
+ The tensors corresponding to the input images. Pixel values can be obtained using
988
+ [`AutoImageProcessor`]. See [`Emu3ImageProcessor.__call__`] for details ([]`Emu3Processor`] uses
989
+ [`Emu3ImageProcessor`] for processing images).
990
+ image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
991
+ The sizes of the images in the batch, being (height, width) for each image. Image sizes can be obtained using
992
+ [`AutoImageProcessor`]. See [`Emu3ImageProcessor.__call__`] for details ([]`Emu3Processor`] uses
993
+ [`Emu3ImageProcessor`] for processing images).
994
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
995
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
996
+
997
+ - 1 for tokens that are **not masked**,
998
+ - 0 for tokens that are **masked**.
999
+
1000
+ [What are attention masks?](../glossary#attention-mask)
1001
+
1002
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
1003
+ [`PreTrainedTokenizer.__call__`] for details.
1004
+
1005
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
1006
+ `past_key_values`).
1007
+
1008
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1009
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1010
+ information on the default strategy.
1011
+
1012
+ - 1 indicates the head is **not masked**,
1013
+ - 0 indicates the head is **masked**.
1014
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1015
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1016
+ config.n_positions - 1]`.
1017
+
1018
+ [What are position IDs?](../glossary#position-ids)
1019
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1020
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1021
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1022
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1023
+
1024
+ Has to be an instance of [`~cache_utils.Cache`] instance, see our
1025
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
1026
+
1027
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1028
+ legacy cache format will be returned.
1029
+
1030
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1031
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1032
+ of shape `(batch_size, sequence_length)`.
1033
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1034
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1035
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1036
+ model's internal embedding lookup matrix.
1037
+ use_cache (`bool`, *optional*):
1038
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1039
+ `past_key_values`).
1040
+ output_attentions (`bool`, *optional*):
1041
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1042
+ tensors for more detail.
1043
+ output_hidden_states (`bool`, *optional*):
1044
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1045
+ more detail.
1046
+ return_dict (`bool`, *optional*):
1047
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1048
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
1049
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
1050
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
1051
+ the complete sequence length.
1052
+ """
1053
+
1054
+
1055
+ class Emu3TextModel(LlamaModel, Emu3PreTrainedModel):
1056
+ def __init__(self, config: Emu3Config):
1057
+ super().__init__(config)
1058
+ self.layers = nn.ModuleList(
1059
+ [Emu3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
1060
+ )
1061
+
1062
+
1063
+ class Emu3ForCausalLM(LlamaForCausalLM, Emu3PreTrainedModel, GenerationMixin):
1064
+ config_class = Emu3TextConfig
1065
+
1066
+ def __init__(self, config):
1067
+ super().__init__(config)
1068
+ self.model = Emu3TextModel(config)
1069
+
1070
+ @add_start_docstrings_to_model_forward(EMU3_TEXT_INPUTS_DOCSTRING)
1071
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class="Emu3TextConfig")
1072
+ def forward(**super_kwargs):
1073
+ r"""
1074
+ Args:
1075
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1076
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1077
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1078
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1079
+ num_logits_to_keep (`int`, *optional*):
1080
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
1081
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1082
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1083
+
1084
+ Returns:
1085
+
1086
+ Example:
1087
+
1088
+ ```python
1089
+ >>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
1090
+ >>> import torch
1091
+ >>> import requests
1092
+ >>> from PIL import Image
1093
+
1094
+ >>> model = Emu3ForCausalLM.from_pretrained("Emu3-community/Emu3-Chat-hf", torch_dtype=torch.bfloat16)
1095
+ >>> processor = Emu3Processor.from_pretrained("Emu3-community/Emu3-Chat-hf")
1096
+
1097
+ >>> inputs = processor(text=["Can you write me a poem about winter."], return_tensors="pt").to(model.device)
1098
+
1099
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
1100
+ >>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1101
+ ```"""
1102
+ super().forward()
1103
+
1104
+
1105
+ class Emu3ForConditionalGeneration(Emu3PreTrainedModel, GenerationMixin):
1106
+ def __init__(self, config):
1107
+ super().__init__(config)
1108
+ self.text_model = Emu3ForCausalLM._from_config(config.text_config)
1109
+ self.vqmodel = Emu3VQVAE(config.vq_config)
1110
+ self.vocabulary_mapping = Emu3ImageVocabularyMapping(config.vocabulary_map)
1111
+
1112
+ # Initialize weights and apply final processing
1113
+ self.post_init()
1114
+
1115
+ def get_input_embeddings(self):
1116
+ return self.text_model.get_input_embeddings()
1117
+
1118
+ def set_input_embeddings(self, value):
1119
+ self.text_model.set_input_embeddings(value)
1120
+
1121
+ def get_image_tokens(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):
1122
+ """
1123
+ Tokenizes images into discrete tokens with VQGAN module. Converts
1124
+ obtained image tokens into BPE tokens and wraps with "boi" and "eoi"
1125
+ special tokens.
1126
+
1127
+ Args:
1128
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1129
+ The tensors corresponding to the input images.
1130
+ image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):
1131
+ The sizes of the images in the batch, being (height, width) for each image.
1132
+ """
1133
+ image_tokens_list = self.vqmodel.encode(pixel_values, image_sizes)
1134
+ bpe_tokens_list = [self.vocabulary_mapping.convert_img2bpe(tokens).flatten() for tokens in image_tokens_list]
1135
+ bpe_tokens = torch.cat(bpe_tokens_list)
1136
+ return bpe_tokens
1137
+
1138
+ @torch.no_grad
1139
+ def decode_image_tokens(self, image_tokens: torch.LongTensor, height: int, width: int):
1140
+ """
1141
+ Decodes generated image tokens from language model to continuous pixel values
1142
+ with VQGAN module via upsampling.
1143
+
1144
+ Args:
1145
+ image_tokens (`torch.LongTensor` of shape `(batch_size, num_of_tokens)`):
1146
+ The tensors corresponding to the input images.
1147
+ height (`int`):
1148
+ Height of the generated image before upsampling.
1149
+ width (`int`):
1150
+ Width of the generated image before upsampling.
1151
+ """
1152
+ sequences = image_tokens[:, :-3].view(-1, height, width + 1)
1153
+ image_tokens = self.vocabulary_mapping.convert_bpe2img(sequences)
1154
+ image = self.vqmodel.decode(image_tokens)
1155
+ return image
1156
+
1157
+ @add_start_docstrings_to_model_forward(EMU3_INPUTS_DOCSTRING)
1158
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1159
+ def forward(
1160
+ self,
1161
+ input_ids: torch.LongTensor = None,
1162
+ pixel_values: torch.FloatTensor = None,
1163
+ image_sizes: torch.Tensor = None,
1164
+ attention_mask: Optional[torch.Tensor] = None,
1165
+ position_ids: Optional[torch.LongTensor] = None,
1166
+ past_key_values: Optional[Cache] = None,
1167
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1168
+ use_cache: Optional[bool] = None,
1169
+ output_attentions: Optional[bool] = None,
1170
+ output_hidden_states: Optional[bool] = None,
1171
+ return_dict: Optional[bool] = None,
1172
+ cache_position: Optional[torch.LongTensor] = None,
1173
+ labels: Optional[torch.LongTensor] = None,
1174
+ num_logits_to_keep: int = 0,
1175
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1176
+ r"""
1177
+ Args:
1178
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1179
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1180
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1181
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1182
+ num_logits_to_keep (`int`, *optional*):
1183
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
1184
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1185
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1186
+
1187
+ Returns:
1188
+
1189
+ Example:
1190
+
1191
+ ```python
1192
+ >>> from transformers import Emu3Processor, Emu3ForConditionalGeneration
1193
+ >>> import torch
1194
+ >>> import requests
1195
+ >>> from PIL import Image
1196
+
1197
+ >>> model = Emu3ForConditionalGeneration.from_pretrained("Emu3-community/Emu3-Chat-hf", torch_dtype=torch.bfloat16)
1198
+ >>> processor = Emu3Processor.from_pretrained("Emu3-community/Emu3-Chat-hf")
1199
+
1200
+ >>> conversation = [
1201
+ ... {
1202
+ ... "role": "system",
1203
+ ... "content": [
1204
+ ... {"type": "text", "text": "You are a helpful assistant."},
1205
+ ... ],
1206
+ ... },
1207
+ ... {
1208
+ ... "role": "user",
1209
+ ... "content": [
1210
+ ... {"type": "image"},
1211
+ ... {"type": "text", "text": "Please describe the image."},
1212
+ ... ],
1213
+ ... },
1214
+ ... ]
1215
+
1216
+ >>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
1217
+ >>> image = Image.open(requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw)
1218
+
1219
+ >>> inputs = processor(images=[image], text=[prompt], return_tensors="pt").to(model.device, torch.bfloat16)
1220
+
1221
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
1222
+ >>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
1223
+ ```"""
1224
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1225
+ output_hidden_states = (
1226
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1227
+ )
1228
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1229
+
1230
+ if (input_ids is None) ^ (inputs_embeds is not None):
1231
+ raise ValueError(
1232
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
1233
+ )
1234
+
1235
+ if pixel_values is not None and inputs_embeds is not None:
1236
+ raise ValueError(
1237
+ "You cannot specify both pixel_values and inputs_embeds at the same time, and must specify either one"
1238
+ )
1239
+
1240
+ if pixel_values is not None:
1241
+ image_tokens = self.get_image_tokens(pixel_values, image_sizes)
1242
+ special_image_mask = input_ids == self.vocabulary_mapping.image_token_id
1243
+ image_tokens = image_tokens.to(input_ids.device, input_ids.dtype)
1244
+ input_ids = input_ids.masked_scatter(special_image_mask, image_tokens)
1245
+
1246
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1247
+ outputs = self.text_model(
1248
+ input_ids=input_ids,
1249
+ attention_mask=attention_mask,
1250
+ position_ids=position_ids,
1251
+ past_key_values=past_key_values,
1252
+ inputs_embeds=inputs_embeds,
1253
+ use_cache=use_cache,
1254
+ output_attentions=output_attentions,
1255
+ output_hidden_states=output_hidden_states,
1256
+ return_dict=return_dict,
1257
+ cache_position=cache_position,
1258
+ num_logits_to_keep=num_logits_to_keep,
1259
+ )
1260
+
1261
+ return outputs
1262
+
1263
+
1264
+ __all__ = [
1265
+ "Emu3ForConditionalGeneration",
1266
+ "Emu3ForCausalLM",
1267
+ "Emu3TextModel",
1268
+ "Emu3PreTrainedModel",
1269
+ "Emu3VQVAE",
1270
+ ]
janus/lib/python3.10/site-packages/transformers/models/emu3/processing_emu3.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ from ...image_processing_utils import BatchFeature
20
+ from ...image_utils import ImageInput
21
+ from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
22
+ from ...tokenization_utils_base import PreTokenizedInput, TextInput
23
+
24
+
25
+ class Emu3TextKwargs(TextKwargs, total=False):
26
+ return_for_image_generation: bool
27
+
28
+
29
+ class Emu3ImagesKwargs(ImagesKwargs, total=False):
30
+ ratio: str
31
+ image_area: int
32
+
33
+
34
+ class Emu3ProcessorKwargs(ProcessingKwargs, total=False):
35
+ text_kwargs: Emu3TextKwargs
36
+ images_kwargs: Emu3ImagesKwargs
37
+ _defaults = {
38
+ "text_kwargs": {
39
+ "return_for_image_generation": False,
40
+ },
41
+ "images_kwargs": {
42
+ "ratio": "1:1",
43
+ "image_area": 518400,
44
+ },
45
+ }
46
+
47
+
48
+ class Emu3Processor(ProcessorMixin):
49
+ r"""
50
+ Constructs a Emu3 processor which wraps a Emu3 image processor and a GPT2 tokenizer into a single
51
+ processor.
52
+
53
+ [`Emu3Processor`] offers all the functionalities of [`Emu3ImageProcessor`] and [`GPT2TokenizerFast`].
54
+ See the [`~Emu3Processor.__call__`] and [`~Emu3Processor.decode`] for more information.
55
+
56
+ Args:
57
+ image_processor ([`Emu3ImageProcessor`]):
58
+ The image processor is a required input.
59
+ tokenizer ([`Emu3TokenizerFast`]):
60
+ The tokenizer is a required input.
61
+ chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
62
+ in a chat into a tokenizable string.
63
+ """
64
+
65
+ attributes = ["image_processor", "tokenizer"]
66
+ tokenizer_class = ("GPT2Tokenizer", "GPT2TokenizerFast")
67
+ image_processor_class = "Emu3ImageProcessor"
68
+
69
+ def __init__(
70
+ self,
71
+ image_processor,
72
+ tokenizer,
73
+ chat_template=None,
74
+ **kwargs,
75
+ ):
76
+ self.image_token = tokenizer.image_token # image_token as placeholder to be replaced by vq-vae tokens
77
+ self.image_start_token = tokenizer.boi_token # "<|image start|>" fixed tokens for start and end of image
78
+ self.image_end_token = tokenizer.eoi_token # "<|image end|>"
79
+ self.fake_token_around_image = tokenizer.image_wrapper_token # "<|image token|>" every image starts with it
80
+ self.eof_token = tokenizer.eof_token # "<|extra_201|>"
81
+ self.bos_token = tokenizer.bos_token
82
+ self.downsample_ratio = 8
83
+ super().__init__(image_processor, tokenizer, chat_template=chat_template)
84
+
85
+ def __call__(
86
+ self,
87
+ images: Optional[ImageInput] = None,
88
+ text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
89
+ audio=None,
90
+ videos=None,
91
+ **kwargs: Unpack[Emu3ProcessorKwargs],
92
+ ) -> BatchFeature:
93
+ """
94
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
95
+ and `kwargs` arguments to Emu3TokenizerFast's [`~Emu3TokenizerFast.__call__`] if `text` is not `None` to encode
96
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
97
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
98
+ of the above two methods for more information.
99
+
100
+ Args:
101
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
102
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
103
+ tensor. Both channels-first and channels-last formats are supported.
104
+ text (`str`, `List[str]`, `List[List[str]]`):
105
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
106
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
107
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
108
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
109
+ If set, will return tensors of a particular framework. Acceptable values are:
110
+
111
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
112
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
113
+ - `'np'`: Return NumPy `np.ndarray` objects.
114
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
115
+
116
+ Returns:
117
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
118
+
119
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
120
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
121
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
122
+ `None`).
123
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
124
+ """
125
+ # check if images and text inputs are reversed for BC
126
+
127
+ if isinstance(text, str):
128
+ text = [text]
129
+ elif not isinstance(text, list) and not isinstance(text[0], str):
130
+ raise TypeError("Invalid input text. Please provide a string, or a list of strings")
131
+
132
+ output_kwargs = self._merge_kwargs(
133
+ Emu3ProcessorKwargs,
134
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
135
+ **kwargs,
136
+ )
137
+ return_for_image_generation = output_kwargs["text_kwargs"].pop("return_for_image_generation", False)
138
+ ratio = output_kwargs["images_kwargs"].pop("ratio", None)
139
+ image_area = output_kwargs["images_kwargs"].pop("image_area", None)
140
+
141
+ if return_for_image_generation and images is not None:
142
+ raise ValueError("You should not provide `images` when `return_for_image_generation=True`")
143
+
144
+ if not return_for_image_generation and text is None and images is None:
145
+ raise ValueError("You must provide either text or images when `return_for_image_generation=False`")
146
+
147
+ image_features = {}
148
+ image_start_tokens = f"{self.image_start_token}"
149
+ image_end_tokens = f"{self.eof_token}{self.image_end_token}"
150
+
151
+ # generate text from image + text input, so we add placeholders for image tokens
152
+ if not return_for_image_generation and images is not None:
153
+ image_features = self.image_processor(images, **output_kwargs["images_kwargs"])
154
+ image_sizes = iter(image_features.image_sizes)
155
+
156
+ prompt_strings = []
157
+ for sample in text:
158
+ while self.image_token in sample:
159
+ image_size = next(image_sizes)
160
+ height, width = image_size
161
+ height = height // self.downsample_ratio
162
+ width = width // self.downsample_ratio
163
+ image_seq_length = height * (width + 1) # +1 for extra row when converting to BPE in modeling code
164
+
165
+ image_placeholder = f"{image_start_tokens}{height}*{width}{self.fake_token_around_image}{'<placeholder>' * image_seq_length}{image_end_tokens}"
166
+ sample = sample.replace(self.image_token, image_placeholder, 1)
167
+ sample = f"{self.bos_token}{sample}" # add BOS because PT tokenizer doesn't add it
168
+ prompt_strings.append(sample)
169
+ text = [sample.replace("<placeholder>", self.image_token) for sample in prompt_strings]
170
+
171
+ # generate image from text input, so we add begin-of-image tokens from where image generation starts
172
+ elif return_for_image_generation:
173
+ height, width = self.calculate_generate_size(ratio, image_area, self.downsample_ratio)
174
+ image_prompt = f"{image_start_tokens}{height}*{width}{self.fake_token_around_image}"
175
+ text = [f"{self.bos_token}{sample}{image_prompt}" for sample in text]
176
+ image_features["image_sizes"] = [[height, width]] * len(text)
177
+
178
+ # else just generate from text-only input, and we do no special treatment for text
179
+ data = self.tokenizer(text, **output_kwargs["text_kwargs"])
180
+ data.update(**image_features)
181
+
182
+ return BatchFeature(data=data, tensor_type=output_kwargs["common_kwargs"]["return_tensors"])
183
+
184
+ def calculate_generate_size(self, ratio, image_area, spatial_factor):
185
+ width, height = map(int, ratio.split(":"))
186
+ current_area = width * height
187
+ target_ratio = (image_area / current_area) ** 0.5
188
+
189
+ token_height = int(round(height * target_ratio / spatial_factor))
190
+ token_width = int(round(width * target_ratio / spatial_factor))
191
+ return token_height, token_width
192
+
193
+ def postprocess(self, images: ImageInput, **kwargs):
194
+ return self.image_processor.postprocess(images, **kwargs)
195
+
196
+ def batch_decode(self, *args, **kwargs):
197
+ """
198
+ This method forwards all its arguments to Emu3TokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
199
+ refer to the docstring of this method for more information.
200
+ """
201
+ return self.tokenizer.batch_decode(*args, **kwargs)
202
+
203
+ def decode(self, *args, **kwargs):
204
+ """
205
+ This method forwards all its arguments to Emu3TokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
206
+ the docstring of this method for more information.
207
+ """
208
+ return self.tokenizer.decode(*args, **kwargs)
209
+
210
+ @property
211
+ def model_input_names(self):
212
+ tokenizer_input_names = self.tokenizer.model_input_names
213
+ image_processor_input_names = self.image_processor.model_input_names
214
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
215
+
216
+
217
+ __all__ = ["Emu3Processor"]
janus/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/configuration_lilt.cpython-310.pyc ADDED
Binary file (5.89 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/lilt/__pycache__/modeling_lilt.cpython-310.pyc ADDED
Binary file (34.6 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/lxmert/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_lxmert import *
22
+ from .modeling_lxmert import *
23
+ from .modeling_tf_lxmert import *
24
+ from .tokenization_lxmert import *
25
+ from .tokenization_lxmert_fast import *
26
+ else:
27
+ import sys
28
+
29
+ _file = globals()["__file__"]
30
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/lxmert/__pycache__/tokenization_lxmert.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/lxmert/configuration_lxmert.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018, Hao Tan, Mohit Bansal
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """LXMERT model configuration"""
16
+
17
+ from ...configuration_utils import PretrainedConfig
18
+ from ...utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+
24
+ class LxmertConfig(PretrainedConfig):
25
+ r"""
26
+ This is the configuration class to store the configuration of a [`LxmertModel`] or a [`TFLxmertModel`]. It is used
27
+ to instantiate a LXMERT model according to the specified arguments, defining the model architecture. Instantiating
28
+ a configuration with the defaults will yield a similar configuration to that of the Lxmert
29
+ [unc-nlp/lxmert-base-uncased](https://huggingface.co/unc-nlp/lxmert-base-uncased) architecture.
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 30522):
37
+ Vocabulary size of the LXMERT model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`LxmertModel`] or [`TFLxmertModel`].
39
+ hidden_size (`int`, *optional*, defaults to 768):
40
+ Dimensionality of the encoder layers and the pooler layer.
41
+ num_attention_heads (`int`, *optional*, defaults to 12):
42
+ Number of attention heads for each attention layer in the Transformer encoder.
43
+ num_qa_labels (`int`, *optional*, defaults to 9500):
44
+ This represents the total number of different question answering (QA) labels there are. If using more than
45
+ one dataset with QA, the user will need to account for the total number of labels that all of the datasets
46
+ have in total.
47
+ num_object_labels (`int`, *optional*, defaults to 1600):
48
+ This represents the total number of semantically unique objects that lxmert will be able to classify a
49
+ pooled-object feature as belonging too.
50
+ num_attr_labels (`int`, *optional*, defaults to 400):
51
+ This represents the total number of semantically unique attributes that lxmert will be able to classify a
52
+ pooled-object feature as possessing.
53
+ intermediate_size (`int`, *optional*, defaults to 3072):
54
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
55
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
61
+ The dropout ratio for the attention probabilities.
62
+ max_position_embeddings (`int`, *optional*, defaults to 512):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ type_vocab_size (`int`, *optional*, defaults to 2):
66
+ The vocabulary size of the *token_type_ids* passed into [`BertModel`].
67
+ initializer_range (`float`, *optional*, defaults to 0.02):
68
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
69
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
70
+ The epsilon used by the layer normalization layers.
71
+ l_layers (`int`, *optional*, defaults to 9):
72
+ Number of hidden layers in the Transformer language encoder.
73
+ x_layers (`int`, *optional*, defaults to 5):
74
+ Number of hidden layers in the Transformer cross modality encoder.
75
+ r_layers (`int`, *optional*, defaults to 5):
76
+ Number of hidden layers in the Transformer visual encoder.
77
+ visual_feat_dim (`int`, *optional*, defaults to 2048):
78
+ This represents the last dimension of the pooled-object features used as input for the model, representing
79
+ the size of each object feature itself.
80
+ visual_pos_dim (`int`, *optional*, defaults to 4):
81
+ This represents the number of spacial features that are mixed into the visual features. The default is set
82
+ to 4 because most commonly this will represent the location of a bounding box. i.e., (x, y, width, height)
83
+ visual_loss_normalizer (`float`, *optional*, defaults to 6.67):
84
+ This represents the scaling factor in which each visual loss is multiplied by if during pretraining, one
85
+ decided to train with multiple vision-based loss objectives.
86
+ task_matched (`bool`, *optional*, defaults to `True`):
87
+ This task is used for sentence-image matching. If the sentence correctly describes the image the label will
88
+ be 1. If the sentence does not correctly describe the image, the label will be 0.
89
+ task_mask_lm (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to add masked language modeling (as used in pretraining models such as BERT) to the loss
91
+ objective.
92
+ task_obj_predict (`bool`, *optional*, defaults to `True`):
93
+ Whether or not to add object prediction, attribute prediction and feature regression to the loss objective.
94
+ task_qa (`bool`, *optional*, defaults to `True`):
95
+ Whether or not to add the question-answering loss to the objective
96
+ visual_obj_loss (`bool`, *optional*, defaults to `True`):
97
+ Whether or not to calculate the object-prediction loss objective
98
+ visual_attr_loss (`bool`, *optional*, defaults to `True`):
99
+ Whether or not to calculate the attribute-prediction loss objective
100
+ visual_feat_loss (`bool`, *optional*, defaults to `True`):
101
+ Whether or not to calculate the feature-regression loss objective
102
+ """
103
+
104
+ model_type = "lxmert"
105
+ attribute_map = {}
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=30522,
110
+ hidden_size=768,
111
+ num_attention_heads=12,
112
+ num_qa_labels=9500,
113
+ num_object_labels=1600,
114
+ num_attr_labels=400,
115
+ intermediate_size=3072,
116
+ hidden_act="gelu",
117
+ hidden_dropout_prob=0.1,
118
+ attention_probs_dropout_prob=0.1,
119
+ max_position_embeddings=512,
120
+ type_vocab_size=2,
121
+ initializer_range=0.02,
122
+ layer_norm_eps=1e-12,
123
+ l_layers=9,
124
+ x_layers=5,
125
+ r_layers=5,
126
+ visual_feat_dim=2048,
127
+ visual_pos_dim=4,
128
+ visual_loss_normalizer=6.67,
129
+ task_matched=True,
130
+ task_mask_lm=True,
131
+ task_obj_predict=True,
132
+ task_qa=True,
133
+ visual_obj_loss=True,
134
+ visual_attr_loss=True,
135
+ visual_feat_loss=True,
136
+ **kwargs,
137
+ ):
138
+ self.vocab_size = vocab_size
139
+ self.hidden_size = hidden_size
140
+ self.num_attention_heads = num_attention_heads
141
+ self.hidden_act = hidden_act
142
+ self.intermediate_size = intermediate_size
143
+ self.hidden_dropout_prob = hidden_dropout_prob
144
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
145
+ self.max_position_embeddings = max_position_embeddings
146
+ self.type_vocab_size = type_vocab_size
147
+ self.initializer_range = initializer_range
148
+ self.layer_norm_eps = layer_norm_eps
149
+ self.num_qa_labels = num_qa_labels
150
+ self.num_object_labels = num_object_labels
151
+ self.num_attr_labels = num_attr_labels
152
+ self.l_layers = l_layers
153
+ self.x_layers = x_layers
154
+ self.r_layers = r_layers
155
+ self.visual_feat_dim = visual_feat_dim
156
+ self.visual_pos_dim = visual_pos_dim
157
+ self.visual_loss_normalizer = visual_loss_normalizer
158
+ self.task_matched = task_matched
159
+ self.task_mask_lm = task_mask_lm
160
+ self.task_obj_predict = task_obj_predict
161
+ self.task_qa = task_qa
162
+ self.visual_obj_loss = visual_obj_loss
163
+ self.visual_attr_loss = visual_attr_loss
164
+ self.visual_feat_loss = visual_feat_loss
165
+ self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
166
+ super().__init__(**kwargs)
167
+
168
+
169
+ __all__ = ["LxmertConfig"]
janus/lib/python3.10/site-packages/transformers/models/lxmert/modeling_lxmert.py ADDED
@@ -0,0 +1,1461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 Hao Tan, Mohit Bansal, and the HuggingFace team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch LXMERT model."""
16
+
17
+ import math
18
+ import os
19
+ import warnings
20
+ from dataclasses import dataclass
21
+ from typing import Dict, Optional, Tuple, Union
22
+
23
+ import torch
24
+ from torch import nn
25
+ from torch.nn import CrossEntropyLoss, SmoothL1Loss
26
+
27
+ from ...activations import ACT2FN, gelu
28
+ from ...modeling_utils import PreTrainedModel
29
+ from ...utils import (
30
+ ModelOutput,
31
+ add_code_sample_docstrings,
32
+ add_start_docstrings,
33
+ add_start_docstrings_to_model_forward,
34
+ logging,
35
+ replace_return_docstrings,
36
+ )
37
+ from .configuration_lxmert import LxmertConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
43
+ _CONFIG_FOR_DOC = "LxmertConfig"
44
+
45
+
46
+ class GeLU(nn.Module):
47
+ def __init__(self):
48
+ super().__init__()
49
+
50
+ def forward(self, x):
51
+ return gelu(x)
52
+
53
+
54
+ @dataclass
55
+ class LxmertModelOutput(ModelOutput):
56
+ """
57
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
58
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
59
+ encoder")
60
+
61
+
62
+ Args:
63
+ language_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
64
+ Sequence of hidden-states at the output of the last layer of the language encoder.
65
+ vision_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
66
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
67
+ pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
68
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
69
+ by a Linear layer and a Tanh activation function. The Linear
70
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
71
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
72
+ shape `(batch_size, sequence_length, hidden_size)`.
73
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
74
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
75
+ shape `(batch_size, sequence_length, hidden_size)`.
76
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
77
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
78
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
79
+ the self-attention heads.
80
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
81
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
82
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
83
+ the self-attention heads.
84
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
85
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
86
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
87
+ the self-attention heads.
88
+ """
89
+
90
+ language_output: Optional[torch.FloatTensor] = None
91
+ vision_output: Optional[torch.FloatTensor] = None
92
+ pooled_output: Optional[torch.FloatTensor] = None
93
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
94
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
95
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
96
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
97
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
98
+
99
+
100
+ @dataclass
101
+ class LxmertForQuestionAnsweringOutput(ModelOutput):
102
+ """
103
+ Output type of [`LxmertForQuestionAnswering`].
104
+
105
+ Args:
106
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
107
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
108
+ (classification) loss.k.
109
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`, *optional*):
110
+ Prediction scores of question answering objective (classification).
111
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
112
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
113
+ shape `(batch_size, sequence_length, hidden_size)`.
114
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
115
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
116
+ shape `(batch_size, sequence_length, hidden_size)`.
117
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
118
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
119
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
120
+ the self-attention heads.
121
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
122
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
123
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
124
+ the self-attention heads.
125
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
126
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
127
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
128
+ the self-attention heads.
129
+ """
130
+
131
+ loss: Optional[torch.FloatTensor] = None
132
+ question_answering_score: Optional[torch.FloatTensor] = None
133
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
134
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
135
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
136
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
137
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
138
+
139
+
140
+ @dataclass
141
+ class LxmertForPreTrainingOutput(ModelOutput):
142
+ """
143
+ Output type of [`LxmertForPreTraining`].
144
+
145
+ Args:
146
+ loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
147
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
148
+ (classification) loss.
149
+ prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
150
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
151
+ cross_relationship_score (`torch.FloatTensor` of shape `(batch_size, 2)`):
152
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
153
+ continuation before SoftMax).
154
+ question_answering_score (`torch.FloatTensor` of shape `(batch_size, n_qa_answers)`):
155
+ Prediction scores of question answering objective (classification).
156
+ language_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
157
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
158
+ shape `(batch_size, sequence_length, hidden_size)`.
159
+ vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
160
+ Tuple of `torch.FloatTensor` (one for input features + one for the output of each cross-modality layer) of
161
+ shape `(batch_size, sequence_length, hidden_size)`.
162
+ language_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
163
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
164
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
165
+ the self-attention heads.
166
+ vision_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
167
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
168
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
169
+ the self-attention heads.
170
+ cross_encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
171
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
172
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
173
+ the self-attention heads.
174
+
175
+ """
176
+
177
+ loss: Optional[torch.FloatTensor] = None
178
+ prediction_logits: Optional[torch.FloatTensor] = None
179
+ cross_relationship_score: Optional[torch.FloatTensor] = None
180
+ question_answering_score: Optional[torch.FloatTensor] = None
181
+ language_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
182
+ vision_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
183
+ language_attentions: Optional[Tuple[torch.FloatTensor]] = None
184
+ vision_attentions: Optional[Tuple[torch.FloatTensor]] = None
185
+ cross_encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
186
+
187
+
188
+ def load_tf_weights_in_lxmert(model, config, tf_checkpoint_path):
189
+ """Load tf checkpoints in a pytorch model."""
190
+ try:
191
+ import re
192
+
193
+ import numpy as np
194
+ import tensorflow as tf
195
+ except ImportError:
196
+ logger.error(
197
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
198
+ "https://www.tensorflow.org/install/ for installation instructions."
199
+ )
200
+ raise
201
+ tf_path = os.path.abspath(tf_checkpoint_path)
202
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
203
+ # Load weights from TF model
204
+ init_vars = tf.train.list_variables(tf_path)
205
+ names = []
206
+ arrays = []
207
+ for name, shape in init_vars:
208
+ logger.info(f"Loading TF weight {name} with shape {shape}")
209
+ array = tf.train.load_variable(tf_path, name)
210
+ names.append(name)
211
+ arrays.append(array)
212
+
213
+ for name, array in zip(names, arrays):
214
+ name = name.split("/")
215
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
216
+ # which are not required for using pretrained model
217
+ if any(
218
+ n
219
+ in [
220
+ "adam_v",
221
+ "adam_m",
222
+ "AdamWeightDecayOptimizer",
223
+ "AdamWeightDecayOptimizer_1",
224
+ "global_step",
225
+ ]
226
+ for n in name
227
+ ):
228
+ logger.info(f"Skipping {'/'.join(name)}")
229
+ continue
230
+ pointer = model
231
+ for m_name in name:
232
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
233
+ scope_names = re.split(r"_(\d+)", m_name)
234
+ else:
235
+ scope_names = [m_name]
236
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
237
+ pointer = getattr(pointer, "weight")
238
+ elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
239
+ pointer = getattr(pointer, "bias")
240
+ elif scope_names[0] == "output_weights":
241
+ pointer = getattr(pointer, "weight")
242
+ elif scope_names[0] == "squad":
243
+ pointer = getattr(pointer, "classifier")
244
+ else:
245
+ try:
246
+ pointer = getattr(pointer, scope_names[0])
247
+ except AttributeError:
248
+ logger.info(f"Skipping {'/'.join(name)}")
249
+ continue
250
+ if len(scope_names) >= 2:
251
+ num = int(scope_names[1])
252
+ pointer = pointer[num]
253
+ if m_name[-11:] == "_embeddings":
254
+ pointer = getattr(pointer, "weight")
255
+ elif m_name == "kernel":
256
+ array = np.transpose(array)
257
+ try:
258
+ assert pointer.shape == array.shape
259
+ except AssertionError as e:
260
+ e.args += (pointer.shape, array.shape)
261
+ raise
262
+ logger.info(f"Initialize PyTorch weight {name}")
263
+ pointer.data = torch.from_numpy(array)
264
+ return model
265
+
266
+
267
+ class LxmertEmbeddings(nn.Module):
268
+ """Construct the embeddings from word, position and token_type embeddings."""
269
+
270
+ def __init__(self, config):
271
+ super().__init__()
272
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
273
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=0)
274
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size, padding_idx=0)
275
+
276
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
277
+ # any TensorFlow checkpoint file
278
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
279
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
280
+
281
+ def forward(self, input_ids, token_type_ids=None, inputs_embeds=None):
282
+ if input_ids is not None:
283
+ input_shape = input_ids.size()
284
+ device = input_ids.device
285
+ else:
286
+ input_shape = inputs_embeds.size()[:-1]
287
+ device = inputs_embeds.device
288
+ seq_length = input_shape[1]
289
+
290
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
291
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
292
+
293
+ if token_type_ids is None:
294
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
295
+
296
+ if inputs_embeds is None:
297
+ inputs_embeds = self.word_embeddings(input_ids)
298
+ position_embeddings = self.position_embeddings(position_ids)
299
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
300
+
301
+ embeddings = inputs_embeds + position_embeddings + token_type_embeddings
302
+ embeddings = self.LayerNorm(embeddings)
303
+ embeddings = self.dropout(embeddings)
304
+ return embeddings
305
+
306
+
307
+ class LxmertAttention(nn.Module):
308
+ def __init__(self, config, ctx_dim=None):
309
+ super().__init__()
310
+ if config.hidden_size % config.num_attention_heads != 0:
311
+ raise ValueError(
312
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
313
+ f"heads ({config.num_attention_heads})"
314
+ )
315
+ self.num_attention_heads = config.num_attention_heads
316
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
317
+ self.head_size = self.num_attention_heads * self.attention_head_size
318
+
319
+ # visual_dim = 2048
320
+ if ctx_dim is None:
321
+ ctx_dim = config.hidden_size
322
+ self.query = nn.Linear(config.hidden_size, self.head_size)
323
+ self.key = nn.Linear(ctx_dim, self.head_size)
324
+ self.value = nn.Linear(ctx_dim, self.head_size)
325
+
326
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
327
+
328
+ def transpose_for_scores(self, x):
329
+ new_x_shape = x.size()[:-1] + (
330
+ self.num_attention_heads,
331
+ self.attention_head_size,
332
+ )
333
+ x = x.view(new_x_shape)
334
+ return x.permute(0, 2, 1, 3)
335
+
336
+ def forward(self, hidden_states, context, attention_mask=None, output_attentions=False):
337
+ mixed_query_layer = self.query(hidden_states)
338
+ mixed_key_layer = self.key(context)
339
+ mixed_value_layer = self.value(context)
340
+
341
+ query_layer = self.transpose_for_scores(mixed_query_layer)
342
+ key_layer = self.transpose_for_scores(mixed_key_layer)
343
+ value_layer = self.transpose_for_scores(mixed_value_layer)
344
+
345
+ # Take the dot product between "query" and "key" to get the raw attention scores.
346
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
347
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
348
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
349
+ if attention_mask is not None:
350
+ attention_scores = attention_scores + attention_mask
351
+
352
+ # Normalize the attention scores to probabilities.
353
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
354
+
355
+ # This is actually dropping out entire tokens to attend to, which might
356
+ # seem a bit unusual, but is taken from the original Transformer paper.
357
+ attention_probs = self.dropout(attention_probs)
358
+
359
+ context_layer = torch.matmul(attention_probs, value_layer)
360
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
361
+ new_context_layer_shape = context_layer.size()[:-2] + (self.head_size,)
362
+ context_layer = context_layer.view(new_context_layer_shape)
363
+
364
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
365
+ return outputs
366
+
367
+
368
+ class LxmertAttentionOutput(nn.Module):
369
+ def __init__(self, config):
370
+ super().__init__()
371
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
372
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
373
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
374
+
375
+ def forward(self, hidden_states, input_tensor):
376
+ hidden_states = self.dense(hidden_states)
377
+ hidden_states = self.dropout(hidden_states)
378
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
379
+ return hidden_states
380
+
381
+
382
+ class LxmertCrossAttentionLayer(nn.Module):
383
+ def __init__(self, config):
384
+ super().__init__()
385
+ self.att = LxmertAttention(config)
386
+ self.output = LxmertAttentionOutput(config)
387
+
388
+ def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None, output_attentions=False):
389
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions=output_attentions)
390
+ if output_attentions:
391
+ attention_probs = output[1]
392
+ attention_output = self.output(output[0], input_tensor)
393
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
394
+ return outputs
395
+
396
+
397
+ class LxmertSelfAttentionLayer(nn.Module):
398
+ def __init__(self, config):
399
+ super().__init__()
400
+ self.self = LxmertAttention(config)
401
+ self.output = LxmertAttentionOutput(config)
402
+
403
+ def forward(self, input_tensor, attention_mask, output_attentions=False):
404
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
405
+ output = self.self(
406
+ input_tensor,
407
+ input_tensor,
408
+ attention_mask,
409
+ output_attentions=output_attentions,
410
+ )
411
+ if output_attentions:
412
+ attention_probs = output[1]
413
+ attention_output = self.output(output[0], input_tensor)
414
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
415
+ return outputs
416
+
417
+
418
+ class LxmertIntermediate(nn.Module):
419
+ def __init__(self, config):
420
+ super().__init__()
421
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
422
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
423
+
424
+ def forward(self, hidden_states):
425
+ hidden_states = self.dense(hidden_states)
426
+ hidden_states = self.intermediate_act_fn(hidden_states)
427
+ return hidden_states
428
+
429
+
430
+ class LxmertOutput(nn.Module):
431
+ def __init__(self, config):
432
+ super().__init__()
433
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
434
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
435
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
436
+
437
+ def forward(self, hidden_states, input_tensor):
438
+ hidden_states = self.dense(hidden_states)
439
+ hidden_states = self.dropout(hidden_states)
440
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
441
+ return hidden_states
442
+
443
+
444
+ class LxmertLayer(nn.Module):
445
+ def __init__(self, config):
446
+ super().__init__()
447
+ self.attention = LxmertSelfAttentionLayer(config)
448
+ self.intermediate = LxmertIntermediate(config)
449
+ self.output = LxmertOutput(config)
450
+
451
+ def forward(self, hidden_states, attention_mask=None, output_attentions=False):
452
+ outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
453
+ attention_output = outputs[0]
454
+ intermediate_output = self.intermediate(attention_output)
455
+ layer_output = self.output(intermediate_output, attention_output)
456
+ outputs = (layer_output,) + outputs[1:] # add attentions if we output them
457
+ return outputs
458
+
459
+
460
+ class LxmertXLayer(nn.Module):
461
+ def __init__(self, config):
462
+ super().__init__()
463
+ # The cross-attention Layer
464
+ self.visual_attention = LxmertCrossAttentionLayer(config)
465
+
466
+ # Self-attention Layers
467
+ self.lang_self_att = LxmertSelfAttentionLayer(config)
468
+ self.visn_self_att = LxmertSelfAttentionLayer(config)
469
+
470
+ # Intermediate and Output Layers (FFNs)
471
+ self.lang_inter = LxmertIntermediate(config)
472
+ self.lang_output = LxmertOutput(config)
473
+ self.visn_inter = LxmertIntermediate(config)
474
+ self.visn_output = LxmertOutput(config)
475
+
476
+ def cross_att(
477
+ self,
478
+ lang_input,
479
+ lang_attention_mask,
480
+ visual_input,
481
+ visual_attention_mask,
482
+ output_x_attentions=False,
483
+ ):
484
+ # Cross Attention
485
+ lang_att_output = self.visual_attention(
486
+ lang_input,
487
+ visual_input,
488
+ ctx_att_mask=visual_attention_mask,
489
+ output_attentions=output_x_attentions,
490
+ )
491
+ visual_att_output = self.visual_attention(
492
+ visual_input,
493
+ lang_input,
494
+ ctx_att_mask=lang_attention_mask,
495
+ output_attentions=False,
496
+ )
497
+ return lang_att_output, visual_att_output
498
+
499
+ def self_att(self, lang_input, lang_attention_mask, visual_input, visual_attention_mask):
500
+ # Self Attention
501
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions=False)
502
+ visual_att_output = self.visn_self_att(visual_input, visual_attention_mask, output_attentions=False)
503
+ return lang_att_output[0], visual_att_output[0]
504
+
505
+ def output_fc(self, lang_input, visual_input):
506
+ # FC layers
507
+ lang_inter_output = self.lang_inter(lang_input)
508
+ visual_inter_output = self.visn_inter(visual_input)
509
+
510
+ # Layer output
511
+ lang_output = self.lang_output(lang_inter_output, lang_input)
512
+ visual_output = self.visn_output(visual_inter_output, visual_input)
513
+
514
+ return lang_output, visual_output
515
+
516
+ def forward(
517
+ self,
518
+ lang_feats,
519
+ lang_attention_mask,
520
+ visual_feats,
521
+ visual_attention_mask,
522
+ output_attentions=False,
523
+ ):
524
+ lang_att_output, visual_att_output = self.cross_att(
525
+ lang_input=lang_feats,
526
+ lang_attention_mask=lang_attention_mask,
527
+ visual_input=visual_feats,
528
+ visual_attention_mask=visual_attention_mask,
529
+ output_x_attentions=output_attentions,
530
+ )
531
+ attention_probs = lang_att_output[1:]
532
+ lang_att_output, visual_att_output = self.self_att(
533
+ lang_att_output[0],
534
+ lang_attention_mask,
535
+ visual_att_output[0],
536
+ visual_attention_mask,
537
+ )
538
+
539
+ lang_output, visual_output = self.output_fc(lang_att_output, visual_att_output)
540
+ return (
541
+ (
542
+ lang_output,
543
+ visual_output,
544
+ attention_probs[0],
545
+ )
546
+ if output_attentions
547
+ else (lang_output, visual_output)
548
+ )
549
+
550
+
551
+ class LxmertVisualFeatureEncoder(nn.Module):
552
+ def __init__(self, config):
553
+ super().__init__()
554
+ feat_dim = config.visual_feat_dim
555
+ pos_dim = config.visual_pos_dim
556
+
557
+ # Object feature encoding
558
+ self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
559
+ self.visn_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
560
+
561
+ # Box position encoding
562
+ self.box_fc = nn.Linear(pos_dim, config.hidden_size)
563
+ self.box_layer_norm = nn.LayerNorm(config.hidden_size, eps=1e-12)
564
+
565
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
566
+
567
+ def forward(self, visual_feats, visual_pos):
568
+ x = self.visn_fc(visual_feats)
569
+ x = self.visn_layer_norm(x)
570
+ y = self.box_fc(visual_pos)
571
+ y = self.box_layer_norm(y)
572
+ output = (x + y) / 2
573
+
574
+ output = self.dropout(output)
575
+ return output
576
+
577
+
578
+ class LxmertEncoder(nn.Module):
579
+ def __init__(self, config):
580
+ super().__init__()
581
+
582
+ # Obj-level image embedding layer
583
+ self.visn_fc = LxmertVisualFeatureEncoder(config)
584
+ self.config = config
585
+
586
+ # Number of layers
587
+ self.num_l_layers = config.l_layers
588
+ self.num_x_layers = config.x_layers
589
+ self.num_r_layers = config.r_layers
590
+
591
+ # Layers
592
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
593
+ self.layer = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_l_layers)])
594
+ self.x_layers = nn.ModuleList([LxmertXLayer(config) for _ in range(self.num_x_layers)])
595
+ self.r_layers = nn.ModuleList([LxmertLayer(config) for _ in range(self.num_r_layers)])
596
+
597
+ def forward(
598
+ self,
599
+ lang_feats,
600
+ lang_attention_mask,
601
+ visual_feats,
602
+ visual_pos,
603
+ visual_attention_mask=None,
604
+ output_attentions=None,
605
+ ):
606
+ vision_hidden_states = ()
607
+ language_hidden_states = ()
608
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
609
+ language_attentions = () if output_attentions or self.config.output_attentions else None
610
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
611
+
612
+ visual_feats = self.visn_fc(visual_feats, visual_pos)
613
+
614
+ # Run language layers
615
+ for layer_module in self.layer:
616
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions=output_attentions)
617
+ lang_feats = l_outputs[0]
618
+ language_hidden_states = language_hidden_states + (lang_feats,)
619
+ if language_attentions is not None:
620
+ language_attentions = language_attentions + (l_outputs[1],)
621
+
622
+ # Run relational layers
623
+ for layer_module in self.r_layers:
624
+ v_outputs = layer_module(visual_feats, visual_attention_mask, output_attentions=output_attentions)
625
+ visual_feats = v_outputs[0]
626
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
627
+ if vision_attentions is not None:
628
+ vision_attentions = vision_attentions + (v_outputs[1],)
629
+
630
+ # Run cross-modality layers
631
+ for layer_module in self.x_layers:
632
+ x_outputs = layer_module(
633
+ lang_feats,
634
+ lang_attention_mask,
635
+ visual_feats,
636
+ visual_attention_mask,
637
+ output_attentions=output_attentions,
638
+ )
639
+ lang_feats, visual_feats = x_outputs[:2]
640
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
641
+ language_hidden_states = language_hidden_states + (lang_feats,)
642
+ if cross_encoder_attentions is not None:
643
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
644
+ visual_encoder_outputs = (
645
+ vision_hidden_states,
646
+ vision_attentions if output_attentions else None,
647
+ )
648
+ lang_encoder_outputs = (
649
+ language_hidden_states,
650
+ language_attentions if output_attentions else None,
651
+ )
652
+ return (
653
+ visual_encoder_outputs,
654
+ lang_encoder_outputs,
655
+ cross_encoder_attentions if output_attentions else None,
656
+ )
657
+
658
+
659
+ class LxmertPooler(nn.Module):
660
+ def __init__(self, config):
661
+ super(LxmertPooler, self).__init__()
662
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
663
+ self.activation = nn.Tanh()
664
+
665
+ def forward(self, hidden_states):
666
+ # We "pool" the model by simply taking the hidden state corresponding
667
+ # to the first token.
668
+ first_token_tensor = hidden_states[:, 0]
669
+ pooled_output = self.dense(first_token_tensor)
670
+ pooled_output = self.activation(pooled_output)
671
+ return pooled_output
672
+
673
+
674
+ class LxmertPredictionHeadTransform(nn.Module):
675
+ def __init__(self, config):
676
+ super(LxmertPredictionHeadTransform, self).__init__()
677
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
678
+ self.transform_act_fn = ACT2FN[config.hidden_act]
679
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
680
+
681
+ def forward(self, hidden_states):
682
+ hidden_states = self.dense(hidden_states)
683
+ hidden_states = self.transform_act_fn(hidden_states)
684
+ hidden_states = self.LayerNorm(hidden_states)
685
+ return hidden_states
686
+
687
+
688
+ class LxmertLMPredictionHead(nn.Module):
689
+ def __init__(self, config, lxmert_model_embedding_weights):
690
+ super(LxmertLMPredictionHead, self).__init__()
691
+ self.transform = LxmertPredictionHeadTransform(config)
692
+
693
+ # The output weights are the same as the input embeddings, but there is
694
+ # an output-only bias for each token.
695
+ self.decoder = nn.Linear(
696
+ lxmert_model_embedding_weights.size(1),
697
+ lxmert_model_embedding_weights.size(0),
698
+ bias=False,
699
+ )
700
+ self.decoder.weight = lxmert_model_embedding_weights
701
+ self.bias = nn.Parameter(torch.zeros(lxmert_model_embedding_weights.size(0)))
702
+
703
+ def forward(self, hidden_states):
704
+ hidden_states = self.transform(hidden_states)
705
+ hidden_states = self.decoder(hidden_states) + self.bias
706
+ return hidden_states
707
+
708
+
709
+ class LxmertVisualAnswerHead(nn.Module):
710
+ def __init__(self, config, num_labels):
711
+ super().__init__()
712
+ hid_dim = config.hidden_size
713
+ self.logit_fc = nn.Sequential(
714
+ nn.Linear(hid_dim, hid_dim * 2),
715
+ GeLU(),
716
+ nn.LayerNorm(hid_dim * 2, eps=1e-12),
717
+ nn.Linear(hid_dim * 2, num_labels),
718
+ )
719
+
720
+ def forward(self, hidden_states):
721
+ return self.logit_fc(hidden_states)
722
+
723
+
724
+ class LxmertVisualObjHead(nn.Module):
725
+ def __init__(self, config):
726
+ super().__init__()
727
+ self.transform = LxmertPredictionHeadTransform(config)
728
+ # Decide the use of visual losses
729
+ visual_losses = {}
730
+ if config.visual_obj_loss:
731
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
732
+ if config.visual_attr_loss:
733
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
734
+ if config.visual_feat_loss:
735
+ visual_losses["feat"] = {
736
+ "shape": (-1, config.visual_feat_dim),
737
+ "num": config.visual_feat_dim,
738
+ }
739
+ self.visual_losses = visual_losses
740
+
741
+ # The output weights are the same as the input embeddings, but there is
742
+ # an output-only bias for each token.
743
+ self.decoder_dict = nn.ModuleDict(
744
+ {key: nn.Linear(config.hidden_size, self.visual_losses[key]["num"]) for key in self.visual_losses}
745
+ )
746
+
747
+ def forward(self, hidden_states):
748
+ hidden_states = self.transform(hidden_states)
749
+ output = {}
750
+ for key in self.visual_losses:
751
+ output[key] = self.decoder_dict[key](hidden_states)
752
+ return output
753
+
754
+
755
+ class LxmertPreTrainingHeads(nn.Module):
756
+ def __init__(self, config, lxmert_model_embedding_weights):
757
+ super(LxmertPreTrainingHeads, self).__init__()
758
+ self.predictions = LxmertLMPredictionHead(config, lxmert_model_embedding_weights)
759
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
760
+
761
+ def forward(self, sequence_output, pooled_output):
762
+ prediction_scores = self.predictions(sequence_output)
763
+ seq_relationship_score = self.seq_relationship(pooled_output)
764
+ return prediction_scores, seq_relationship_score
765
+
766
+
767
+ class LxmertPreTrainedModel(PreTrainedModel):
768
+ """
769
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
770
+ models.
771
+ """
772
+
773
+ config_class = LxmertConfig
774
+ load_tf_weights = load_tf_weights_in_lxmert
775
+ base_model_prefix = "lxmert"
776
+ _supports_param_buffer_assignment = False
777
+
778
+ def _init_weights(self, module):
779
+ """Initialize the weights"""
780
+ if isinstance(module, nn.Linear):
781
+ # Slightly different from the TF version which uses truncated_normal for initialization
782
+ # cf https://github.com/pytorch/pytorch/pull/5617
783
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
784
+ if module.bias is not None:
785
+ module.bias.data.zero_()
786
+ elif isinstance(module, nn.Embedding):
787
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
788
+ if module.padding_idx is not None:
789
+ module.weight.data[module.padding_idx].zero_()
790
+ elif isinstance(module, nn.LayerNorm):
791
+ module.bias.data.zero_()
792
+ module.weight.data.fill_(1.0)
793
+
794
+
795
+ LXMERT_START_DOCSTRING = r"""
796
+
797
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
798
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
799
+ model, pretrained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MSCOCO captions, and Visual
800
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
801
+ for question answering attribute prediction, and object tag prediction.
802
+
803
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
804
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
805
+ etc.)
806
+
807
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
808
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
809
+ and behavior.
810
+
811
+ Parameters:
812
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
813
+ Initializing with a config file does not load the weights associated with the model, only the
814
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
815
+ """
816
+
817
+ LXMERT_INPUTS_DOCSTRING = r"""
818
+
819
+ Args:
820
+ input_ids (`torch.LongTensor` of shape `({0})`):
821
+ Indices of input sequence tokens in the vocabulary.
822
+
823
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
824
+ [`PreTrainedTokenizer.__call__`] for details.
825
+
826
+ [What are input IDs?](../glossary#input-ids)
827
+ visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
828
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
829
+ faster-RCNN model)
830
+
831
+ These are currently not provided by the transformers library.
832
+ visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
833
+ This input represents spacial features corresponding to their relative (via index) visual features. The
834
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
835
+ 1.
836
+
837
+ These are currently not provided by the transformers library.
838
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
839
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
840
+
841
+ - 1 for tokens that are **not masked**,
842
+ - 0 for tokens that are **masked**.
843
+
844
+ [What are attention masks?](../glossary#attention-mask)
845
+ visual_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
846
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
847
+
848
+ - 1 for tokens that are **not masked**,
849
+ - 0 for tokens that are **masked**.
850
+
851
+ [What are attention masks?](../glossary#attention-mask)
852
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
853
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
854
+ 1]`:
855
+
856
+ - 0 corresponds to a *sentence A* token,
857
+ - 1 corresponds to a *sentence B* token.
858
+
859
+ [What are token type IDs?](../glossary#token-type-ids)
860
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
861
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
862
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
863
+ model's internal embedding lookup matrix.
864
+ output_attentions (`bool`, *optional*):
865
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
866
+ tensors for more detail.
867
+ output_hidden_states (`bool`, *optional*):
868
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
869
+ more detail.
870
+ return_dict (`bool`, *optional*):
871
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
872
+ """
873
+
874
+
875
+ @add_start_docstrings(
876
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
877
+ LXMERT_START_DOCSTRING,
878
+ )
879
+ class LxmertModel(LxmertPreTrainedModel):
880
+ def __init__(self, config):
881
+ super().__init__(config)
882
+ self.embeddings = LxmertEmbeddings(config)
883
+ self.encoder = LxmertEncoder(config)
884
+ self.pooler = LxmertPooler(config)
885
+ # Initialize weights and apply final processing
886
+ self.post_init()
887
+
888
+ def get_input_embeddings(self):
889
+ return self.embeddings.word_embeddings
890
+
891
+ def set_input_embeddings(self, new_embeddings):
892
+ self.embeddings.word_embeddings = new_embeddings
893
+
894
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
895
+ @add_code_sample_docstrings(
896
+ checkpoint=_CHECKPOINT_FOR_DOC,
897
+ output_type=LxmertModelOutput,
898
+ config_class=_CONFIG_FOR_DOC,
899
+ )
900
+ def forward(
901
+ self,
902
+ input_ids: Optional[torch.LongTensor] = None,
903
+ visual_feats: Optional[torch.FloatTensor] = None,
904
+ visual_pos: Optional[torch.FloatTensor] = None,
905
+ attention_mask: Optional[torch.FloatTensor] = None,
906
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
907
+ token_type_ids: Optional[torch.LongTensor] = None,
908
+ inputs_embeds: Optional[torch.FloatTensor] = None,
909
+ output_attentions: Optional[bool] = None,
910
+ output_hidden_states: Optional[bool] = None,
911
+ return_dict: Optional[bool] = None,
912
+ ) -> Union[LxmertModelOutput, Tuple[torch.FloatTensor]]:
913
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
914
+ output_hidden_states = (
915
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
916
+ )
917
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
918
+
919
+ if input_ids is not None and inputs_embeds is not None:
920
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
921
+ elif input_ids is not None:
922
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
923
+ input_shape = input_ids.size()
924
+ elif inputs_embeds is not None:
925
+ input_shape = inputs_embeds.size()[:-1]
926
+ else:
927
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
928
+
929
+ if visual_feats is None:
930
+ raise ValueError("`visual_feats` cannot be `None`")
931
+ if visual_pos is None:
932
+ raise ValueError("`visual_pos` cannot be `None`")
933
+
934
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
935
+
936
+ if attention_mask is None:
937
+ attention_mask = torch.ones(input_shape, device=device)
938
+ if token_type_ids is None:
939
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
940
+
941
+ # We create a 3D attention mask from a 2D tensor mask.
942
+ # Sizes are [batch_size, 1, 1, to_seq_length]
943
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
944
+ # this attention mask is more simple than the triangular masking of causal attention
945
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
946
+ extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
947
+
948
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
949
+ # masked positions, this operation will create a tensor which is 0.0 for
950
+ # positions we want to attend and the dtype's smallest value for masked positions.
951
+ # Since we are adding it to the raw scores before the softmax, this is
952
+ # effectively the same as removing these entirely.
953
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
954
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
955
+
956
+ # Process the visual attention mask
957
+ if visual_attention_mask is not None:
958
+ extended_visual_attention_mask = visual_attention_mask.unsqueeze(1).unsqueeze(2)
959
+ extended_visual_attention_mask = extended_visual_attention_mask.to(dtype=self.dtype)
960
+ extended_visual_attention_mask = (1.0 - extended_visual_attention_mask) * torch.finfo(self.dtype).min
961
+ else:
962
+ extended_visual_attention_mask = None
963
+
964
+ # Positional Word Embeddings
965
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds)
966
+
967
+ # Run Lxmert encoder
968
+ encoder_outputs = self.encoder(
969
+ embedding_output,
970
+ extended_attention_mask,
971
+ visual_feats=visual_feats,
972
+ visual_pos=visual_pos,
973
+ visual_attention_mask=extended_visual_attention_mask,
974
+ output_attentions=output_attentions,
975
+ )
976
+
977
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
978
+ vision_hidden_states = visual_encoder_outputs[0]
979
+ language_hidden_states = lang_encoder_outputs[0]
980
+
981
+ all_attentions = ()
982
+ if output_attentions:
983
+ language_attentions = lang_encoder_outputs[1]
984
+ vision_attentions = visual_encoder_outputs[1]
985
+ cross_encoder_attentions = encoder_outputs[2]
986
+ all_attentions = (
987
+ language_attentions,
988
+ vision_attentions,
989
+ cross_encoder_attentions,
990
+ )
991
+
992
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
993
+
994
+ visual_output = vision_hidden_states[-1]
995
+ lang_output = language_hidden_states[-1]
996
+ pooled_output = self.pooler(lang_output)
997
+
998
+ if not return_dict:
999
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
1000
+
1001
+ return LxmertModelOutput(
1002
+ pooled_output=pooled_output,
1003
+ language_output=lang_output,
1004
+ vision_output=visual_output,
1005
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
1006
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
1007
+ language_attentions=language_attentions if output_attentions else None,
1008
+ vision_attentions=vision_attentions if output_attentions else None,
1009
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
1010
+ )
1011
+
1012
+
1013
+ @add_start_docstrings(
1014
+ """Lxmert Model with a specified pretraining head on top.""",
1015
+ LXMERT_START_DOCSTRING,
1016
+ )
1017
+ class LxmertForPreTraining(LxmertPreTrainedModel):
1018
+ _tied_weights_keys = ["cls.predictions.decoder.weight"]
1019
+
1020
+ def __init__(self, config):
1021
+ super().__init__(config)
1022
+ # Configuration
1023
+ self.config = config
1024
+ self.num_qa_labels = config.num_qa_labels
1025
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1026
+
1027
+ # Use of pretraining tasks
1028
+ self.task_mask_lm = config.task_mask_lm
1029
+ self.task_obj_predict = config.task_obj_predict
1030
+ self.task_matched = config.task_matched
1031
+ self.task_qa = config.task_qa
1032
+
1033
+ # Lxmert backbone
1034
+ self.lxmert = LxmertModel(config)
1035
+
1036
+ # Pre-training heads
1037
+ self.cls = LxmertPreTrainingHeads(config, self.lxmert.embeddings.word_embeddings.weight)
1038
+ if self.task_obj_predict:
1039
+ self.obj_predict_head = LxmertVisualObjHead(config)
1040
+ if self.task_qa:
1041
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1042
+
1043
+ # Weight initialization
1044
+ # Initialize weights and apply final processing
1045
+ self.post_init()
1046
+
1047
+ # Loss functions
1048
+ self.loss_fcts = {
1049
+ "l2": SmoothL1Loss(reduction="none"),
1050
+ "visual_ce": CrossEntropyLoss(reduction="none"),
1051
+ "ce": CrossEntropyLoss(),
1052
+ }
1053
+
1054
+ visual_losses = {}
1055
+ if config.visual_obj_loss:
1056
+ visual_losses["obj"] = {
1057
+ "shape": (-1,),
1058
+ "num": config.num_object_labels,
1059
+ "loss": "visual_ce",
1060
+ }
1061
+ if config.visual_attr_loss:
1062
+ visual_losses["attr"] = {
1063
+ "shape": (-1,),
1064
+ "num": config.num_attr_labels,
1065
+ "loss": "visual_ce",
1066
+ }
1067
+ if config.visual_feat_loss:
1068
+ visual_losses["feat"] = {
1069
+ "shape": (-1, config.visual_feat_dim),
1070
+ "num": config.visual_feat_dim,
1071
+ "loss": "l2",
1072
+ }
1073
+ self.visual_losses = visual_losses
1074
+
1075
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
1076
+ # Adding the following steps to resize bias to match the shape of resized embeddings
1077
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
1078
+ self.cls.predictions.bias = self._resize_bias(self.cls.predictions.bias, new_num_tokens)
1079
+ return new_embeddings
1080
+
1081
+ def _resize_bias(self, bias, new_num_tokens: int):
1082
+ old_num_tokens = bias.shape[0]
1083
+ if new_num_tokens <= old_num_tokens:
1084
+ new_bias = bias[:new_num_tokens]
1085
+ else:
1086
+ extra_bias = torch.zeros(new_num_tokens - old_num_tokens, device=bias.device)
1087
+ new_bias = torch.cat([bias, extra_bias])
1088
+ new_bias = nn.Parameter(new_bias)
1089
+ return new_bias
1090
+
1091
+ def resize_num_qa_labels(self, num_labels):
1092
+ """
1093
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1094
+ will add newly initialized weights. Reducing the size will remove weights from the end
1095
+
1096
+ Args:
1097
+ num_labels (`int`, *optional*):
1098
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1099
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1100
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1101
+
1102
+ Return:
1103
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1104
+ """
1105
+
1106
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1107
+ if num_labels is None or cur_qa_logit_layer is None:
1108
+ return
1109
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1110
+ self.config.num_qa_labels = num_labels
1111
+ self.num_qa_labels = num_labels
1112
+
1113
+ return new_qa_logit_layer
1114
+
1115
+ def _resize_qa_labels(self, num_labels):
1116
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1117
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1118
+ self._set_qa_logit_layer(new_qa_logit_layer)
1119
+ return self.get_qa_logit_layer()
1120
+
1121
+ def get_qa_logit_layer(self) -> nn.Module:
1122
+ """
1123
+ Returns the linear layer that produces question answering logits.
1124
+
1125
+ Returns:
1126
+ `nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT
1127
+ does not have a visual answering head.
1128
+ """
1129
+ if hasattr(self, "answer_head"):
1130
+ return self.answer_head.logit_fc[-1]
1131
+
1132
+ def _set_qa_logit_layer(self, qa_logit_layer):
1133
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1134
+
1135
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1136
+ if num_labels is None:
1137
+ return cur_qa_logit_layer
1138
+
1139
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1140
+ if cur_qa_labels == num_labels:
1141
+ return cur_qa_logit_layer
1142
+
1143
+ # Build new linear output
1144
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1145
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1146
+ else:
1147
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1148
+
1149
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1150
+
1151
+ # initialize all new labels
1152
+ self._init_weights(new_qa_logit_layer)
1153
+
1154
+ # Copy labels from the previous weights
1155
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1156
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1157
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1158
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1159
+
1160
+ return new_qa_logit_layer
1161
+
1162
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1163
+ @replace_return_docstrings(output_type=LxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1164
+ def forward(
1165
+ self,
1166
+ input_ids: Optional[torch.LongTensor] = None,
1167
+ visual_feats: Optional[torch.FloatTensor] = None,
1168
+ visual_pos: Optional[torch.FloatTensor] = None,
1169
+ attention_mask: Optional[torch.FloatTensor] = None,
1170
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1171
+ token_type_ids: Optional[torch.LongTensor] = None,
1172
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1173
+ labels: Optional[torch.LongTensor] = None,
1174
+ obj_labels: Optional[Dict[str, Tuple[torch.FloatTensor, torch.FloatTensor]]] = None,
1175
+ matched_label: Optional[torch.LongTensor] = None,
1176
+ ans: Optional[torch.Tensor] = None,
1177
+ output_attentions: Optional[bool] = None,
1178
+ output_hidden_states: Optional[bool] = None,
1179
+ return_dict: Optional[bool] = None,
1180
+ **kwargs,
1181
+ ) -> Union[LxmertForPreTrainingOutput, Tuple[torch.FloatTensor]]:
1182
+ r"""
1183
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1184
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1185
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1186
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1187
+ obj_labels (`Dict[Str: Tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*):
1188
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1189
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1190
+ the label score respectively
1191
+ matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1192
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1193
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1194
+
1195
+ - 0 indicates that the sentence does not match the image,
1196
+ - 1 indicates that the sentence does match the image.
1197
+ ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1198
+ a one hot representation hof the correct answer *optional*
1199
+
1200
+ Returns:
1201
+ """
1202
+
1203
+ if "masked_lm_labels" in kwargs:
1204
+ warnings.warn(
1205
+ "The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`"
1206
+ " instead.",
1207
+ FutureWarning,
1208
+ )
1209
+ labels = kwargs.pop("masked_lm_labels")
1210
+
1211
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1212
+
1213
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1214
+ lxmert_output = self.lxmert(
1215
+ input_ids=input_ids,
1216
+ visual_feats=visual_feats,
1217
+ visual_pos=visual_pos,
1218
+ token_type_ids=token_type_ids,
1219
+ attention_mask=attention_mask,
1220
+ visual_attention_mask=visual_attention_mask,
1221
+ inputs_embeds=inputs_embeds,
1222
+ output_hidden_states=output_hidden_states,
1223
+ output_attentions=output_attentions,
1224
+ return_dict=return_dict,
1225
+ )
1226
+
1227
+ lang_output, visual_output, pooled_output = (
1228
+ lxmert_output[0],
1229
+ lxmert_output[1],
1230
+ lxmert_output[2],
1231
+ )
1232
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1233
+ if self.task_qa:
1234
+ answer_score = self.answer_head(pooled_output)
1235
+ else:
1236
+ answer_score = pooled_output[0][0]
1237
+
1238
+ total_loss = (
1239
+ None
1240
+ if (labels is None and matched_label is None and obj_labels is None and ans is None)
1241
+ else torch.tensor(0.0, device=device)
1242
+ )
1243
+ if labels is not None and self.task_mask_lm:
1244
+ masked_lm_loss = self.loss_fcts["ce"](
1245
+ lang_prediction_scores.view(-1, self.config.vocab_size),
1246
+ labels.view(-1),
1247
+ )
1248
+ total_loss += masked_lm_loss
1249
+ if matched_label is not None and self.task_matched:
1250
+ matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
1251
+ total_loss += matched_loss
1252
+ if obj_labels is not None and self.task_obj_predict:
1253
+ total_visual_loss = torch.tensor(0.0, device=input_ids.device)
1254
+ visual_prediction_scores_dict = self.obj_predict_head(visual_output)
1255
+ for key, key_info in self.visual_losses.items():
1256
+ label, mask_conf = obj_labels[key]
1257
+ output_dim = key_info["num"]
1258
+ loss_fct_name = key_info["loss"]
1259
+ label_shape = key_info["shape"]
1260
+ weight = self.visual_loss_normalizer
1261
+ visual_loss_fct = self.loss_fcts[loss_fct_name]
1262
+ visual_prediction_scores = visual_prediction_scores_dict[key]
1263
+ visual_loss = visual_loss_fct(
1264
+ visual_prediction_scores.view(-1, output_dim),
1265
+ label.view(label_shape),
1266
+ )
1267
+ if visual_loss.dim() > 1: # Regression Losses
1268
+ visual_loss = visual_loss.mean(1)
1269
+ visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
1270
+ total_visual_loss += visual_loss
1271
+ total_loss += total_visual_loss
1272
+ if ans is not None and self.task_qa:
1273
+ answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
1274
+ total_loss += answer_loss
1275
+
1276
+ if not return_dict:
1277
+ output = (
1278
+ lang_prediction_scores,
1279
+ cross_relationship_score,
1280
+ answer_score,
1281
+ ) + lxmert_output[3:]
1282
+ return ((total_loss,) + output) if total_loss is not None else output
1283
+
1284
+ return LxmertForPreTrainingOutput(
1285
+ loss=total_loss,
1286
+ prediction_logits=lang_prediction_scores,
1287
+ cross_relationship_score=cross_relationship_score,
1288
+ question_answering_score=answer_score,
1289
+ language_hidden_states=lxmert_output.language_hidden_states,
1290
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1291
+ language_attentions=lxmert_output.language_attentions,
1292
+ vision_attentions=lxmert_output.vision_attentions,
1293
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1294
+ )
1295
+
1296
+
1297
+ @add_start_docstrings(
1298
+ """Lxmert Model with a visual-answering head on top for downstream QA tasks""",
1299
+ LXMERT_START_DOCSTRING,
1300
+ )
1301
+ class LxmertForQuestionAnswering(LxmertPreTrainedModel):
1302
+ def __init__(self, config):
1303
+ super().__init__(config)
1304
+ # Configuration
1305
+ self.config = config
1306
+ self.num_qa_labels = config.num_qa_labels
1307
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1308
+
1309
+ # Lxmert backbone
1310
+ self.lxmert = LxmertModel(config)
1311
+
1312
+ self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
1313
+
1314
+ # Weight initialization
1315
+ # Initialize weights and apply final processing
1316
+ self.post_init()
1317
+
1318
+ # Loss function
1319
+ self.loss = CrossEntropyLoss()
1320
+
1321
+ def resize_num_qa_labels(self, num_labels):
1322
+ """
1323
+ Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
1324
+ will add newly initialized weights. Reducing the size will remove weights from the end
1325
+
1326
+ Args:
1327
+ num_labels (`int`, *optional*):
1328
+ New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
1329
+ weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
1330
+ returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
1331
+
1332
+ Return:
1333
+ `torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
1334
+ """
1335
+
1336
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1337
+ if num_labels is None or cur_qa_logit_layer is None:
1338
+ return
1339
+ new_qa_logit_layer = self._resize_qa_labels(num_labels)
1340
+ self.config.num_qa_labels = num_labels
1341
+ self.num_qa_labels = num_labels
1342
+
1343
+ return new_qa_logit_layer
1344
+
1345
+ def _resize_qa_labels(self, num_labels):
1346
+ cur_qa_logit_layer = self.get_qa_logit_layer()
1347
+ new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
1348
+ self._set_qa_logit_layer(new_qa_logit_layer)
1349
+ return self.get_qa_logit_layer()
1350
+
1351
+ def get_qa_logit_layer(self) -> nn.Module:
1352
+ """
1353
+ Returns the linear layer that produces question answering logits
1354
+
1355
+ Returns:
1356
+ `nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType
1357
+ object if Lxmert does not have the visual answering head.
1358
+ """
1359
+
1360
+ if hasattr(self, "answer_head"):
1361
+ return self.answer_head.logit_fc[-1]
1362
+
1363
+ def _set_qa_logit_layer(self, qa_logit_layer):
1364
+ self.answer_head.logit_fc[-1] = qa_logit_layer
1365
+
1366
+ def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
1367
+ if num_labels is None:
1368
+ return cur_qa_logit_layer
1369
+
1370
+ cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
1371
+ if cur_qa_labels == num_labels:
1372
+ return cur_qa_logit_layer
1373
+
1374
+ # Build new linear output
1375
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1376
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
1377
+ else:
1378
+ new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
1379
+
1380
+ new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
1381
+
1382
+ # initialize all new labels
1383
+ self._init_weights(new_qa_logit_layer)
1384
+
1385
+ # Copy labels from the previous weights
1386
+ num_labels_to_copy = min(cur_qa_labels, num_labels)
1387
+ new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
1388
+ if getattr(cur_qa_logit_layer, "bias", None) is not None:
1389
+ new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
1390
+
1391
+ return new_qa_logit_layer
1392
+
1393
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1394
+ @add_code_sample_docstrings(
1395
+ checkpoint=_CHECKPOINT_FOR_DOC,
1396
+ output_type=LxmertForQuestionAnsweringOutput,
1397
+ config_class=_CONFIG_FOR_DOC,
1398
+ )
1399
+ def forward(
1400
+ self,
1401
+ input_ids: Optional[torch.LongTensor] = None,
1402
+ visual_feats: Optional[torch.FloatTensor] = None,
1403
+ visual_pos: Optional[torch.FloatTensor] = None,
1404
+ attention_mask: Optional[torch.FloatTensor] = None,
1405
+ visual_attention_mask: Optional[torch.FloatTensor] = None,
1406
+ token_type_ids: Optional[torch.LongTensor] = None,
1407
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1408
+ labels: Optional[torch.Tensor] = None,
1409
+ output_attentions: Optional[bool] = None,
1410
+ output_hidden_states: Optional[bool] = None,
1411
+ return_dict: Optional[bool] = None,
1412
+ ) -> Union[LxmertForQuestionAnsweringOutput, Tuple[torch.FloatTensor]]:
1413
+ r"""
1414
+ labels (`Torch.Tensor` of shape `(batch_size)`, *optional*):
1415
+ A one-hot representation of the correct answer
1416
+ """
1417
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1418
+
1419
+ lxmert_output = self.lxmert(
1420
+ input_ids=input_ids,
1421
+ visual_feats=visual_feats,
1422
+ visual_pos=visual_pos,
1423
+ token_type_ids=token_type_ids,
1424
+ attention_mask=attention_mask,
1425
+ visual_attention_mask=visual_attention_mask,
1426
+ inputs_embeds=inputs_embeds,
1427
+ output_hidden_states=output_hidden_states,
1428
+ output_attentions=output_attentions,
1429
+ return_dict=return_dict,
1430
+ )
1431
+
1432
+ pooled_output = lxmert_output[2]
1433
+ answer_score = self.answer_head(pooled_output)
1434
+ loss = None
1435
+ if labels is not None:
1436
+ loss = self.loss(answer_score.view(-1, self.num_qa_labels), labels.view(-1))
1437
+
1438
+ if not return_dict:
1439
+ output = (answer_score,) + lxmert_output[3:]
1440
+ return (loss,) + output if loss is not None else output
1441
+
1442
+ return LxmertForQuestionAnsweringOutput(
1443
+ loss=loss,
1444
+ question_answering_score=answer_score,
1445
+ language_hidden_states=lxmert_output.language_hidden_states,
1446
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1447
+ language_attentions=lxmert_output.language_attentions,
1448
+ vision_attentions=lxmert_output.vision_attentions,
1449
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1450
+ )
1451
+
1452
+
1453
+ __all__ = [
1454
+ "LxmertEncoder",
1455
+ "LxmertForPreTraining",
1456
+ "LxmertForQuestionAnswering",
1457
+ "LxmertModel",
1458
+ "LxmertPreTrainedModel",
1459
+ "LxmertVisualFeatureEncoder",
1460
+ "LxmertXLayer",
1461
+ ]
janus/lib/python3.10/site-packages/transformers/models/lxmert/modeling_tf_lxmert.py ADDED
@@ -0,0 +1,1661 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the
3
+ # Lxmert Authors.
4
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ """TF 2.0 LXMERT model."""
18
+
19
+ from __future__ import annotations
20
+
21
+ import warnings
22
+ from dataclasses import dataclass
23
+ from typing import Dict, Optional, Tuple, Union
24
+
25
+ import numpy as np
26
+ import tensorflow as tf
27
+
28
+ from ...activations_tf import get_tf_activation
29
+ from ...modeling_tf_utils import (
30
+ TFModelInputType,
31
+ TFPreTrainedModel,
32
+ get_initializer,
33
+ keras,
34
+ keras_serializable,
35
+ shape_list,
36
+ unpack_inputs,
37
+ )
38
+ from ...tf_utils import check_embeddings_within_bounds, stable_softmax
39
+ from ...utils import (
40
+ ModelOutput,
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_lxmert import LxmertConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased"
53
+ _CONFIG_FOR_DOC = "LxmertConfig"
54
+
55
+
56
+ @dataclass
57
+ class TFLxmertModelOutput(ModelOutput):
58
+ """
59
+ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,
60
+ visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship"
61
+ encoder")
62
+
63
+
64
+ Args:
65
+ language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
66
+ Sequence of hidden-states at the output of the last layer of the language encoder.
67
+ vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
68
+ Sequence of hidden-states at the output of the last layer of the visual encoder.
69
+ pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
70
+ Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed
71
+ by a Linear layer and a Tanh activation function. The Linear
72
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
73
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
74
+ `(batch_size, sequence_length, hidden_size)`.
75
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
76
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
77
+ `(batch_size, sequence_length, hidden_size)`.
78
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
79
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
80
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
81
+ the self-attention heads.
82
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
83
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
84
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
85
+ the self-attention heads.
86
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
87
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
88
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
89
+ the self-attention heads.
90
+ """
91
+
92
+ language_output: tf.Tensor | None = None
93
+ vision_output: tf.Tensor | None = None
94
+ pooled_output: tf.Tensor | None = None
95
+ language_hidden_states: Tuple[tf.Tensor] | None = None
96
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
97
+ language_attentions: Tuple[tf.Tensor] | None = None
98
+ vision_attentions: Tuple[tf.Tensor] | None = None
99
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
100
+
101
+
102
+ @dataclass
103
+ class TFLxmertForPreTrainingOutput(ModelOutput):
104
+ """
105
+ Output type of [`LxmertForPreTraining`].
106
+
107
+ Args:
108
+ loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):
109
+ Total loss as the sum of the masked language modeling loss and the next sequence prediction
110
+ (classification) loss.
111
+ prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
112
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
113
+ cross_relationship_score (`tf.Tensor` of shape `(batch_size, 2)`):
114
+ Prediction scores of the textual matching objective (classification) head (scores of True/False
115
+ continuation before SoftMax).
116
+ question_answering_score (`tf.Tensor` of shape `(batch_size, n_qa_answers)`):
117
+ Prediction scores of question answering objective (classification).
118
+ language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
119
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
120
+ `(batch_size, sequence_length, hidden_size)`.
121
+ vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
122
+ Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape
123
+ `(batch_size, sequence_length, hidden_size)`.
124
+ language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
125
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
126
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
127
+ the self-attention heads.
128
+ vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
129
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
130
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
131
+ the self-attention heads.
132
+ cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
133
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
134
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
135
+ the self-attention heads.
136
+
137
+ """
138
+
139
+ loss: tf.Tensor | None = None
140
+ prediction_logits: tf.Tensor | None = None
141
+ cross_relationship_score: tf.Tensor | None = None
142
+ question_answering_score: tf.Tensor | None = None
143
+ language_hidden_states: Tuple[tf.Tensor] | None = None
144
+ vision_hidden_states: Tuple[tf.Tensor] | None = None
145
+ language_attentions: Tuple[tf.Tensor] | None = None
146
+ vision_attentions: Tuple[tf.Tensor] | None = None
147
+ cross_encoder_attentions: Tuple[tf.Tensor] | None = None
148
+
149
+
150
+ class TFLxmertVisualFeatureEncoder(keras.layers.Layer):
151
+ def __init__(self, config, **kwargs):
152
+ super().__init__(**kwargs)
153
+
154
+ # Object feature encoding
155
+ self.visn_fc = keras.layers.Dense(
156
+ config.hidden_size,
157
+ kernel_initializer=get_initializer(config.initializer_range),
158
+ name="visn_fc",
159
+ )
160
+ self.visn_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="visn_layer_norm")
161
+
162
+ # Box position encoding
163
+ self.box_fc = keras.layers.Dense(
164
+ config.hidden_size,
165
+ kernel_initializer=get_initializer(config.initializer_range),
166
+ name="box_fc",
167
+ )
168
+ self.box_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm")
169
+
170
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
171
+ self.feat_dim = config.visual_feat_dim
172
+ self.pos_dim = config.visual_pos_dim
173
+ self.config = config
174
+
175
+ def call(self, visn_input, training=False):
176
+ feats, boxes = visn_input
177
+
178
+ x = self.visn_fc(feats)
179
+ x = self.visn_layer_norm(x)
180
+ y = self.box_fc(boxes)
181
+ y = self.box_layer_norm(y)
182
+ output = (x + y) / 2
183
+
184
+ output = self.dropout(output, training=training)
185
+ return output
186
+
187
+ def build(self, input_shape=None):
188
+ if self.built:
189
+ return
190
+ self.built = True
191
+ if getattr(self, "visn_fc", None) is not None:
192
+ with tf.name_scope(self.visn_fc.name):
193
+ self.visn_fc.build([None, None, self.feat_dim])
194
+ if getattr(self, "visn_layer_norm", None) is not None:
195
+ with tf.name_scope(self.visn_layer_norm.name):
196
+ self.visn_layer_norm.build([None, None, self.config.hidden_size])
197
+ if getattr(self, "box_fc", None) is not None:
198
+ with tf.name_scope(self.box_fc.name):
199
+ self.box_fc.build([None, None, self.pos_dim])
200
+ if getattr(self, "box_layer_norm", None) is not None:
201
+ with tf.name_scope(self.box_layer_norm.name):
202
+ self.box_layer_norm.build([None, None, self.config.hidden_size])
203
+
204
+
205
+ class TFLxmertEmbeddings(keras.layers.Layer):
206
+ """Construct the embeddings from word, position and token_type embeddings."""
207
+
208
+ def __init__(self, config, **kwargs):
209
+ super().__init__(**kwargs)
210
+
211
+ self.config = config
212
+ self.hidden_size = config.hidden_size
213
+ self.max_position_embeddings = config.max_position_embeddings
214
+ self.initializer_range = config.initializer_range
215
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
216
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
217
+
218
+ def build(self, input_shape=None):
219
+ with tf.name_scope("word_embeddings"):
220
+ self.weight = self.add_weight(
221
+ name="weight",
222
+ shape=[self.config.vocab_size, self.hidden_size],
223
+ initializer=get_initializer(initializer_range=self.initializer_range),
224
+ )
225
+
226
+ with tf.name_scope("token_type_embeddings"):
227
+ self.token_type_embeddings = self.add_weight(
228
+ name="embeddings",
229
+ shape=[self.config.type_vocab_size, self.hidden_size],
230
+ initializer=get_initializer(initializer_range=self.initializer_range),
231
+ )
232
+
233
+ with tf.name_scope("position_embeddings"):
234
+ self.position_embeddings = self.add_weight(
235
+ name="embeddings",
236
+ shape=[self.max_position_embeddings, self.hidden_size],
237
+ initializer=get_initializer(initializer_range=self.initializer_range),
238
+ )
239
+
240
+ if self.built:
241
+ return
242
+ self.built = True
243
+ if getattr(self, "LayerNorm", None) is not None:
244
+ with tf.name_scope(self.LayerNorm.name):
245
+ self.LayerNorm.build([None, None, self.config.hidden_size])
246
+
247
+ def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
248
+ """
249
+ Applies embedding based on inputs tensor.
250
+
251
+ Returns:
252
+ final_embeddings (`tf.Tensor`): output embedding tensor.
253
+ """
254
+ assert not (input_ids is None and inputs_embeds is None)
255
+
256
+ if input_ids is not None:
257
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
258
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
259
+
260
+ input_shape = shape_list(inputs_embeds)[:-1]
261
+
262
+ if token_type_ids is None:
263
+ token_type_ids = tf.fill(dims=input_shape, value=0)
264
+
265
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
266
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
267
+ token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
268
+ final_embeddings = inputs_embeds + position_embeds + token_type_embeds
269
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
270
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
271
+
272
+ return final_embeddings
273
+
274
+
275
+ class TFLxmertAttention(keras.layers.Layer):
276
+ def __init__(self, config, **kwargs):
277
+ super().__init__(**kwargs)
278
+ if config.hidden_size % config.num_attention_heads != 0:
279
+ raise ValueError(
280
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
281
+ f"heads ({config.num_attention_heads}"
282
+ )
283
+
284
+ self.num_attention_heads = config.num_attention_heads
285
+ assert config.hidden_size % config.num_attention_heads == 0
286
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
287
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
288
+
289
+ self.query = keras.layers.Dense(
290
+ self.all_head_size,
291
+ kernel_initializer=get_initializer(config.initializer_range),
292
+ name="query",
293
+ )
294
+ self.key = keras.layers.Dense(
295
+ self.all_head_size,
296
+ kernel_initializer=get_initializer(config.initializer_range),
297
+ name="key",
298
+ )
299
+ self.value = keras.layers.Dense(
300
+ self.all_head_size,
301
+ kernel_initializer=get_initializer(config.initializer_range),
302
+ name="value",
303
+ )
304
+
305
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
306
+ self.ctx_dim = config.hidden_size
307
+ self.config = config
308
+
309
+ def transpose_for_scores(self, x, batch_size):
310
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
311
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
312
+ return tf.transpose(x, perm=[0, 2, 1, 3])
313
+
314
+ def call(self, hidden_states, context, attention_mask, output_attentions, training=False):
315
+ batch_size = shape_list(hidden_states)[0]
316
+ mixed_query_layer = self.query(hidden_states)
317
+ mixed_key_layer = self.key(context)
318
+ mixed_value_layer = self.value(context)
319
+
320
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
321
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
322
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
323
+
324
+ # Take the dot product between "query" and "key" to get the raw attention scores.
325
+ attention_scores = tf.matmul(
326
+ query_layer, key_layer, transpose_b=True
327
+ ) # (batch size, num_heads, seq_len_q, seq_len_k)
328
+ dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
329
+ attention_scores = attention_scores / tf.math.sqrt(dk)
330
+
331
+ if attention_mask is not None:
332
+ # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function)
333
+ attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
334
+ attention_scores = attention_scores + attention_mask
335
+
336
+ # Normalize the attention scores to probabilities.
337
+ attention_probs = stable_softmax(attention_scores, axis=-1)
338
+
339
+ # This is actually dropping out entire tokens to attend to, which might
340
+ # seem a bit unusual, but is taken from the original Transformer paper.
341
+ attention_probs = self.dropout(attention_probs, training=training)
342
+ context_layer = tf.matmul(attention_probs, value_layer)
343
+
344
+ context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
345
+ context_layer = tf.reshape(
346
+ context_layer, (batch_size, -1, self.all_head_size)
347
+ ) # (batch_size, seq_len_q, all_head_size)
348
+
349
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
350
+ return outputs
351
+
352
+ def build(self, input_shape=None):
353
+ if self.built:
354
+ return
355
+ self.built = True
356
+ if getattr(self, "query", None) is not None:
357
+ with tf.name_scope(self.query.name):
358
+ self.query.build([None, None, self.config.hidden_size])
359
+ if getattr(self, "key", None) is not None:
360
+ with tf.name_scope(self.key.name):
361
+ self.key.build([None, None, self.ctx_dim])
362
+ if getattr(self, "value", None) is not None:
363
+ with tf.name_scope(self.value.name):
364
+ self.value.build([None, None, self.ctx_dim])
365
+
366
+
367
+ class TFLxmertIntermediate(keras.layers.Layer):
368
+ def __init__(self, config, **kwargs):
369
+ super().__init__(**kwargs)
370
+ self.dense = keras.layers.Dense(
371
+ config.intermediate_size,
372
+ kernel_initializer=get_initializer(config.initializer_range),
373
+ name="dense",
374
+ )
375
+ if isinstance(config.hidden_act, str):
376
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
377
+ else:
378
+ self.intermediate_act_fn = config.hidden_act
379
+ self.config = config
380
+
381
+ def call(self, hidden_states):
382
+ hidden_states = self.dense(hidden_states)
383
+ hidden_states = self.intermediate_act_fn(hidden_states)
384
+ return hidden_states
385
+
386
+ def build(self, input_shape=None):
387
+ if self.built:
388
+ return
389
+ self.built = True
390
+ if getattr(self, "dense", None) is not None:
391
+ with tf.name_scope(self.dense.name):
392
+ self.dense.build([None, None, self.config.hidden_size])
393
+
394
+
395
+ class TFLxmertOutput(keras.layers.Layer):
396
+ def __init__(self, config, **kwargs):
397
+ super().__init__(**kwargs)
398
+ self.dense = keras.layers.Dense(
399
+ config.hidden_size,
400
+ kernel_initializer=get_initializer(config.initializer_range),
401
+ name="dense",
402
+ )
403
+
404
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
405
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
406
+ self.config = config
407
+
408
+ def call(self, hidden_states, input_tensor, training=False):
409
+ hidden_states = self.dense(hidden_states)
410
+ hidden_states = self.dropout(hidden_states, training)
411
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
412
+ return hidden_states
413
+
414
+ def build(self, input_shape=None):
415
+ if self.built:
416
+ return
417
+ self.built = True
418
+ if getattr(self, "dense", None) is not None:
419
+ with tf.name_scope(self.dense.name):
420
+ self.dense.build([None, None, self.config.intermediate_size])
421
+ if getattr(self, "LayerNorm", None) is not None:
422
+ with tf.name_scope(self.LayerNorm.name):
423
+ self.LayerNorm.build([None, None, self.config.hidden_size])
424
+
425
+
426
+ class TFLxmertAttentionOutput(keras.layers.Layer):
427
+ def __init__(self, config, **kwargs):
428
+ super().__init__(**kwargs)
429
+ self.dense = keras.layers.Dense(
430
+ config.hidden_size,
431
+ kernel_initializer=get_initializer(config.initializer_range),
432
+ name="dense",
433
+ )
434
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
435
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
436
+ self.config = config
437
+
438
+ def call(self, hidden_states, input_tensor, training=False):
439
+ hidden_states = self.dense(hidden_states)
440
+ hidden_states = self.dropout(hidden_states, training=training)
441
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
442
+ return hidden_states
443
+
444
+ def build(self, input_shape=None):
445
+ if self.built:
446
+ return
447
+ self.built = True
448
+ if getattr(self, "dense", None) is not None:
449
+ with tf.name_scope(self.dense.name):
450
+ self.dense.build([None, None, self.config.hidden_size])
451
+ if getattr(self, "LayerNorm", None) is not None:
452
+ with tf.name_scope(self.LayerNorm.name):
453
+ self.LayerNorm.build([None, None, self.config.hidden_size])
454
+
455
+
456
+ class TFLxmertSelfAttentionLayer(keras.layers.Layer):
457
+ def __init__(self, config, **kwargs):
458
+ super().__init__(**kwargs)
459
+ self.self = TFLxmertAttention(config, name="self")
460
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
461
+
462
+ def call(self, input_tensor, attention_mask, output_attentions, training=False):
463
+ # Self attention attends to itself, thus keys and queries are the same (input_tensor).
464
+ self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions)
465
+ if output_attentions:
466
+ attention_probs = self_output[1]
467
+ attention_output = self.attention_output(self_output[0], input_tensor)
468
+ return (attention_output, attention_probs) if output_attentions else (attention_output,)
469
+
470
+ def build(self, input_shape=None):
471
+ if self.built:
472
+ return
473
+ self.built = True
474
+ if getattr(self, "self", None) is not None:
475
+ with tf.name_scope(self.self.name):
476
+ self.self.build(None)
477
+ if getattr(self, "attention_output", None) is not None:
478
+ with tf.name_scope(self.attention_output.name):
479
+ self.attention_output.build(None)
480
+
481
+
482
+ class TFLxmertCrossAttentionLayer(keras.layers.Layer):
483
+ def __init__(self, config, **kwargs):
484
+ super().__init__(**kwargs)
485
+ self.att = TFLxmertAttention(config, name="att")
486
+ self.attention_output = TFLxmertAttentionOutput(config, name="output")
487
+
488
+ def call(
489
+ self,
490
+ input_tensor,
491
+ ctx_tensor,
492
+ ctx_att_mask,
493
+ output_attentions=False,
494
+ training=False,
495
+ ):
496
+ output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training)
497
+ if output_attentions:
498
+ attention_probs = output[1]
499
+ attention_output = self.attention_output(output[0], input_tensor, training=training)
500
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
501
+ return outputs
502
+
503
+ def build(self, input_shape=None):
504
+ if self.built:
505
+ return
506
+ self.built = True
507
+ if getattr(self, "att", None) is not None:
508
+ with tf.name_scope(self.att.name):
509
+ self.att.build(None)
510
+ if getattr(self, "attention_output", None) is not None:
511
+ with tf.name_scope(self.attention_output.name):
512
+ self.attention_output.build(None)
513
+
514
+
515
+ class TFLxmertLayer(keras.layers.Layer):
516
+ def __init__(self, config, **kwargs):
517
+ super().__init__(**kwargs)
518
+ self.attention = TFLxmertSelfAttentionLayer(config, name="attention")
519
+ self.intermediate = TFLxmertIntermediate(config, name="intermediate")
520
+ self.transformer_output = TFLxmertOutput(config, name="output")
521
+
522
+ def call(self, hidden_states, attention_mask, output_attentions, training=False):
523
+ attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training)
524
+ attention_output = attention_outputs[0]
525
+ intermediate_output = self.intermediate(attention_output)
526
+ layer_output = self.transformer_output(intermediate_output, attention_output, training=training)
527
+ outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
528
+ return outputs
529
+
530
+ def build(self, input_shape=None):
531
+ if self.built:
532
+ return
533
+ self.built = True
534
+ if getattr(self, "attention", None) is not None:
535
+ with tf.name_scope(self.attention.name):
536
+ self.attention.build(None)
537
+ if getattr(self, "intermediate", None) is not None:
538
+ with tf.name_scope(self.intermediate.name):
539
+ self.intermediate.build(None)
540
+ if getattr(self, "transformer_output", None) is not None:
541
+ with tf.name_scope(self.transformer_output.name):
542
+ self.transformer_output.build(None)
543
+
544
+
545
+ class TFLxmertXLayer(keras.layers.Layer):
546
+ def __init__(self, config, **kwargs):
547
+ super().__init__(**kwargs)
548
+ self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention")
549
+
550
+ # Self-attention Layers
551
+ self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att")
552
+ self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att")
553
+
554
+ # Intermediate and Output Layers (FFNs)
555
+ self.lang_inter = TFLxmertIntermediate(config, name="lang_inter")
556
+ self.lang_output = TFLxmertOutput(config, name="lang_output")
557
+ self.visn_inter = TFLxmertIntermediate(config, name="visn_inter")
558
+ self.visn_output = TFLxmertOutput(config, name="visn_output")
559
+
560
+ def cross_att(
561
+ self,
562
+ lang_input,
563
+ lang_attention_mask,
564
+ visn_input,
565
+ visn_attention_mask,
566
+ output_attentions,
567
+ training=False,
568
+ ):
569
+ # Cross Attention
570
+
571
+ # Keras saving and loading model *does not work* with the same inputs for two layers.
572
+ lang_attention_lang_input = tf.identity(lang_input)
573
+ visn_attention_lang_input = tf.identity(lang_input)
574
+ lang_attention_visn_input = tf.identity(visn_input)
575
+ visn_attention_visn_input = tf.identity(visn_input)
576
+
577
+ lang_att_output = self.visual_attention(
578
+ lang_attention_lang_input,
579
+ lang_attention_visn_input,
580
+ visn_attention_mask,
581
+ output_attentions=output_attentions,
582
+ training=training,
583
+ )
584
+ visn_att_output = self.visual_attention(
585
+ visn_attention_visn_input,
586
+ visn_attention_lang_input,
587
+ lang_attention_mask,
588
+ output_attentions=output_attentions,
589
+ training=training,
590
+ )
591
+ return lang_att_output, visn_att_output
592
+
593
+ def self_att(
594
+ self,
595
+ lang_input,
596
+ lang_attention_mask,
597
+ visn_input,
598
+ visn_attention_mask,
599
+ training=False,
600
+ ):
601
+ # Self Attention
602
+ output_attentions = False
603
+ lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training)
604
+ visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training)
605
+ return lang_att_output[0], visn_att_output[0]
606
+
607
+ def output_fc(self, lang_input, visn_input, training=False):
608
+ # FC layers
609
+ lang_inter_output = self.lang_inter(lang_input)
610
+ visn_inter_output = self.visn_inter(visn_input)
611
+
612
+ # Layer output
613
+ lang_output = self.lang_output(lang_inter_output, lang_input, training)
614
+ visn_output = self.visn_output(visn_inter_output, visn_input, training)
615
+ return lang_output, visn_output
616
+
617
+ def call(
618
+ self,
619
+ lang_feats,
620
+ lang_attention_mask,
621
+ visn_feats,
622
+ visn_attention_mask,
623
+ output_attentions,
624
+ training=False,
625
+ ):
626
+ lang_att_output = lang_feats
627
+ visn_att_output = visn_feats
628
+
629
+ lang_att_output, visn_att_output = self.cross_att(
630
+ lang_att_output,
631
+ lang_attention_mask,
632
+ visn_att_output,
633
+ visn_attention_mask,
634
+ output_attentions,
635
+ training=training,
636
+ )
637
+ attention_probs = lang_att_output[1:]
638
+ lang_att_output, visn_att_output = self.self_att(
639
+ lang_att_output[0],
640
+ lang_attention_mask,
641
+ visn_att_output[0],
642
+ visn_attention_mask,
643
+ training=training,
644
+ )
645
+ lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training)
646
+
647
+ return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output)
648
+
649
+ def build(self, input_shape=None):
650
+ if self.built:
651
+ return
652
+ self.built = True
653
+ if getattr(self, "visual_attention", None) is not None:
654
+ with tf.name_scope(self.visual_attention.name):
655
+ self.visual_attention.build(None)
656
+ if getattr(self, "lang_self_att", None) is not None:
657
+ with tf.name_scope(self.lang_self_att.name):
658
+ self.lang_self_att.build(None)
659
+ if getattr(self, "visn_self_att", None) is not None:
660
+ with tf.name_scope(self.visn_self_att.name):
661
+ self.visn_self_att.build(None)
662
+ if getattr(self, "lang_inter", None) is not None:
663
+ with tf.name_scope(self.lang_inter.name):
664
+ self.lang_inter.build(None)
665
+ if getattr(self, "lang_output", None) is not None:
666
+ with tf.name_scope(self.lang_output.name):
667
+ self.lang_output.build(None)
668
+ if getattr(self, "visn_inter", None) is not None:
669
+ with tf.name_scope(self.visn_inter.name):
670
+ self.visn_inter.build(None)
671
+ if getattr(self, "visn_output", None) is not None:
672
+ with tf.name_scope(self.visn_output.name):
673
+ self.visn_output.build(None)
674
+
675
+
676
+ class TFLxmertEncoder(keras.layers.Layer):
677
+ def __init__(self, config, **kwargs):
678
+ super().__init__(**kwargs)
679
+
680
+ self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc")
681
+
682
+ # Number of layers
683
+ self.num_l_layers = config.l_layers
684
+ self.num_x_layers = config.x_layers
685
+ self.num_r_layers = config.r_layers
686
+
687
+ # Layers
688
+ # Using self.layer instead of self.l_layer to support loading BERT weights.
689
+ self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)]
690
+ self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)]
691
+ self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)]
692
+ self.config = config
693
+
694
+ def call(
695
+ self,
696
+ lang_feats=None,
697
+ lang_attention_mask=None,
698
+ visual_feats=None,
699
+ visual_pos=None,
700
+ visual_attention_mask=None,
701
+ output_attentions=None,
702
+ training=False,
703
+ ):
704
+ vision_hidden_states = ()
705
+ language_hidden_states = ()
706
+ vision_attentions = () if output_attentions or self.config.output_attentions else None
707
+ language_attentions = () if output_attentions or self.config.output_attentions else None
708
+ cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None
709
+
710
+ visual_feats = self.visn_fc([visual_feats, visual_pos], training=training)
711
+
712
+ # Run language layers
713
+ for layer_module in self.layer:
714
+ l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training)
715
+ lang_feats = l_outputs[0]
716
+ language_hidden_states = language_hidden_states + (lang_feats,)
717
+ if language_attentions is not None:
718
+ language_attentions = language_attentions + (l_outputs[1],)
719
+
720
+ # Run relational layers
721
+ for layer_module in self.r_layers:
722
+ v_outputs = layer_module(
723
+ visual_feats,
724
+ visual_attention_mask,
725
+ output_attentions,
726
+ training=training,
727
+ )
728
+ visual_feats = v_outputs[0]
729
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
730
+ if vision_attentions is not None:
731
+ vision_attentions = vision_attentions + (v_outputs[1],)
732
+
733
+ # Run cross-modality layers
734
+ for layer_module in self.x_layers:
735
+ x_outputs = layer_module(
736
+ lang_feats,
737
+ lang_attention_mask,
738
+ visual_feats,
739
+ visual_attention_mask,
740
+ output_attentions,
741
+ training=training,
742
+ )
743
+ lang_feats, visual_feats = x_outputs[:2]
744
+ vision_hidden_states = vision_hidden_states + (visual_feats,)
745
+ language_hidden_states = language_hidden_states + (lang_feats,)
746
+ if cross_encoder_attentions is not None:
747
+ cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)
748
+
749
+ visual_encoder_outputs = (
750
+ vision_hidden_states,
751
+ vision_attentions if output_attentions else None,
752
+ )
753
+ lang_encoder_outputs = (
754
+ language_hidden_states,
755
+ language_attentions if output_attentions else None,
756
+ )
757
+
758
+ return (
759
+ visual_encoder_outputs,
760
+ lang_encoder_outputs,
761
+ cross_encoder_attentions if output_attentions else None,
762
+ )
763
+
764
+ def build(self, input_shape=None):
765
+ if self.built:
766
+ return
767
+ self.built = True
768
+ if getattr(self, "visn_fc", None) is not None:
769
+ with tf.name_scope(self.visn_fc.name):
770
+ self.visn_fc.build(None)
771
+ if getattr(self, "layer", None) is not None:
772
+ for layer in self.layer:
773
+ with tf.name_scope(layer.name):
774
+ layer.build(None)
775
+ if getattr(self, "x_layers", None) is not None:
776
+ for layer in self.x_layers:
777
+ with tf.name_scope(layer.name):
778
+ layer.build(None)
779
+ if getattr(self, "r_layers", None) is not None:
780
+ for layer in self.r_layers:
781
+ with tf.name_scope(layer.name):
782
+ layer.build(None)
783
+
784
+
785
+ @keras_serializable
786
+ class TFLxmertMainLayer(keras.layers.Layer):
787
+ config_class = LxmertConfig
788
+
789
+ def __init__(self, config, **kwargs):
790
+ super().__init__(**kwargs)
791
+
792
+ self.config = config
793
+ self.num_l_layers = config.l_layers
794
+ self.num_x_layers = config.x_layers
795
+ self.num_r_layers = config.r_layers
796
+ self.initializer_range = config.initializer_range
797
+ self.output_attentions = config.output_attentions
798
+ self.output_hidden_states = config.output_hidden_states
799
+ self.return_dict = config.use_return_dict
800
+ self.embeddings = TFLxmertEmbeddings(config, name="embeddings")
801
+ self.encoder = TFLxmertEncoder(config, name="encoder")
802
+ self.pooler = TFLxmertPooler(config, name="pooler")
803
+ self.config = config
804
+
805
+ def get_input_embeddings(self):
806
+ return self.embeddings
807
+
808
+ def set_input_embeddings(self, value):
809
+ self.embeddings.weight = value
810
+ self.embeddings.vocab_size = shape_list(value)[0]
811
+
812
+ def _prune_heads(self, heads_to_prune):
813
+ raise NotImplementedError
814
+
815
+ @unpack_inputs
816
+ def call(
817
+ self,
818
+ input_ids=None,
819
+ visual_feats=None,
820
+ visual_pos=None,
821
+ attention_mask=None,
822
+ visual_attention_mask=None,
823
+ token_type_ids=None,
824
+ inputs_embeds=None,
825
+ output_attentions=None,
826
+ output_hidden_states=None,
827
+ return_dict=None,
828
+ training=False,
829
+ ):
830
+ if input_ids is not None and inputs_embeds is not None:
831
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
832
+ elif input_ids is not None:
833
+ input_shape = shape_list(input_ids)
834
+ elif inputs_embeds is not None:
835
+ input_shape = shape_list(inputs_embeds)[:-1]
836
+ else:
837
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
838
+ if visual_pos is None or visual_feats is None:
839
+ raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.")
840
+
841
+ if attention_mask is None:
842
+ attention_mask = tf.fill(input_shape, 1)
843
+
844
+ if token_type_ids is None:
845
+ token_type_ids = tf.fill(input_shape, 0)
846
+
847
+ # Positional Word Embeddings
848
+ embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training)
849
+
850
+ # We create a 3D attention mask from a 2D tensor mask.
851
+ # Sizes are [batch_size, 1, 1, to_seq_length]
852
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
853
+ # this attention mask is more simple than the triangular masking of causal attention
854
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
855
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
856
+
857
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
858
+ # masked positions, this operation will create a tensor which is 0.0 for
859
+ # positions we want to attend and -10000.0 for masked positions.
860
+ # Since we are adding it to the raw scores before the softmax, this is
861
+ # effectively the same as removing these entirely.
862
+
863
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
864
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
865
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
866
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
867
+
868
+ if visual_attention_mask is not None:
869
+ extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1]))
870
+ extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1)
871
+
872
+ extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype)
873
+ extended_visual_attention_mask = tf.multiply(
874
+ tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst
875
+ )
876
+ else:
877
+ extended_visual_attention_mask = None
878
+
879
+ # Run Lxmert encoder
880
+ encoder_outputs = self.encoder(
881
+ embedding_output,
882
+ extended_attention_mask,
883
+ visual_feats,
884
+ visual_pos,
885
+ extended_visual_attention_mask,
886
+ output_attentions,
887
+ training,
888
+ )
889
+ visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]
890
+ vision_hidden_states = visual_encoder_outputs[0]
891
+ language_hidden_states = lang_encoder_outputs[0]
892
+
893
+ all_attentions = ()
894
+ if output_attentions:
895
+ language_attentions = lang_encoder_outputs[1]
896
+ vision_attentions = visual_encoder_outputs[1]
897
+ cross_encoder_attentions = encoder_outputs[2]
898
+ all_attentions = (
899
+ language_attentions,
900
+ vision_attentions,
901
+ cross_encoder_attentions,
902
+ )
903
+
904
+ hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()
905
+
906
+ visual_output = vision_hidden_states[-1]
907
+ lang_output = language_hidden_states[-1]
908
+ pooled_output = self.pooler(lang_output)
909
+
910
+ if not return_dict:
911
+ return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions
912
+
913
+ return TFLxmertModelOutput(
914
+ pooled_output=pooled_output,
915
+ language_output=lang_output,
916
+ vision_output=visual_output,
917
+ language_hidden_states=language_hidden_states if output_hidden_states else None,
918
+ vision_hidden_states=vision_hidden_states if output_hidden_states else None,
919
+ language_attentions=language_attentions if output_attentions else None,
920
+ vision_attentions=vision_attentions if output_attentions else None,
921
+ cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,
922
+ )
923
+
924
+ def build(self, input_shape=None):
925
+ if self.built:
926
+ return
927
+ self.built = True
928
+ if getattr(self, "embeddings", None) is not None:
929
+ with tf.name_scope(self.embeddings.name):
930
+ self.embeddings.build(None)
931
+ if getattr(self, "encoder", None) is not None:
932
+ with tf.name_scope(self.encoder.name):
933
+ self.encoder.build(None)
934
+ if getattr(self, "pooler", None) is not None:
935
+ with tf.name_scope(self.pooler.name):
936
+ self.pooler.build(None)
937
+
938
+
939
+ class TFLxmertPreTrainedModel(TFPreTrainedModel):
940
+ """
941
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
942
+ models.
943
+ """
944
+
945
+ config_class = LxmertConfig
946
+ base_model_prefix = "lxmert"
947
+
948
+ @property
949
+ def dummy_inputs(self):
950
+ """
951
+ Dummy inputs to build the network.
952
+
953
+ Returns:
954
+ tf.Tensor with dummy inputs
955
+ """
956
+ batch_size = 2
957
+ num_visual_features = 10
958
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
959
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
960
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
961
+
962
+ return {
963
+ "input_ids": input_ids,
964
+ "visual_feats": visual_feats,
965
+ "visual_pos": visual_pos,
966
+ }
967
+
968
+ @property
969
+ def input_signature(self):
970
+ return {
971
+ "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
972
+ "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
973
+ "visual_feats": tf.TensorSpec((None, None, self.config.visual_feat_dim), tf.float32, name="visual_feats"),
974
+ "visual_pos": tf.TensorSpec((None, None, 4), tf.float32, name="visual_pos"),
975
+ "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"),
976
+ "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
977
+ }
978
+
979
+
980
+ LXMERT_START_DOCSTRING = r"""
981
+
982
+ The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from
983
+ Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer
984
+ model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual
985
+ genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss
986
+ for question answering attribute prediction, and object tag prediction.
987
+
988
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
989
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
990
+ behavior.
991
+
992
+ <Tip>
993
+
994
+ TensorFlow models and layers in `transformers` accept two formats as input:
995
+
996
+ - having all inputs as keyword arguments (like PyTorch models), or
997
+ - having all inputs as a list, tuple or dict in the first positional argument.
998
+
999
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1000
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1001
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1002
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1003
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1004
+ positional argument:
1005
+
1006
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1007
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1008
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1009
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1010
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1011
+
1012
+ Note that when creating models and layers with
1013
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1014
+ about any of this, as you can just pass inputs like you would to any other Python function!
1015
+
1016
+ </Tip>
1017
+
1018
+ Parameters:
1019
+ config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.
1020
+ Initializing with a config file does not load the weights associated with the model, only the
1021
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
1022
+ """
1023
+
1024
+ LXMERT_INPUTS_DOCSTRING = r"""
1025
+ Args:
1026
+ input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
1027
+ Indices of input sequence tokens in the vocabulary.
1028
+
1029
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1030
+ [`PreTrainedTokenizer.encode`] for details.
1031
+
1032
+ [What are input IDs?](../glossary#input-ids)
1033
+ visual_feats (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1034
+ This input represents visual features. They ROI pooled object features from bounding boxes using a
1035
+ faster-RCNN model)
1036
+
1037
+ These are currently not provided by the transformers library.
1038
+ visual_pos (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
1039
+ This input represents spacial features corresponding to their relative (via index) visual features. The
1040
+ pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to
1041
+ 1.
1042
+
1043
+ These are currently not provided by the transformers library.
1044
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1045
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1046
+
1047
+ - 1 for tokens that are **not masked**,
1048
+ - 0 for tokens that are **masked**.
1049
+
1050
+ [What are attention masks?](../glossary#attention-mask)
1051
+ visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1052
+ MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1053
+
1054
+ - 1 for tokens that are **not masked**,
1055
+ - 0 for tokens that are **masked**.
1056
+
1057
+ [What are attention masks?](../glossary#attention-mask)
1058
+ token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1059
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1060
+ 1]`:
1061
+
1062
+ - 0 corresponds to a *sentence A* token,
1063
+ - 1 corresponds to a *sentence B* token.
1064
+
1065
+ [What are token type IDs?](../glossary#token-type-ids)
1066
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1067
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1068
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1069
+ model's internal embedding lookup matrix.
1070
+ output_attentions (`bool`, *optional*):
1071
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1072
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1073
+ config will be used instead.
1074
+ output_hidden_states (`bool`, *optional*):
1075
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1076
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1077
+ used instead.
1078
+ return_dict (`bool`, *optional*):
1079
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1080
+ eager mode, in graph mode the value will always be set to True.
1081
+ training (`bool`, *optional*, defaults to `False`):
1082
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1083
+ behaviors between training and evaluation).
1084
+ """
1085
+
1086
+
1087
+ @add_start_docstrings(
1088
+ "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.",
1089
+ LXMERT_START_DOCSTRING,
1090
+ )
1091
+ class TFLxmertModel(TFLxmertPreTrainedModel):
1092
+ def __init__(self, config, *inputs, **kwargs):
1093
+ super().__init__(config, *inputs, **kwargs)
1094
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1095
+
1096
+ @unpack_inputs
1097
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1098
+ @add_code_sample_docstrings(
1099
+ checkpoint=_CHECKPOINT_FOR_DOC,
1100
+ output_type=TFLxmertModelOutput,
1101
+ config_class=_CONFIG_FOR_DOC,
1102
+ )
1103
+ def call(
1104
+ self,
1105
+ input_ids: TFModelInputType | None = None,
1106
+ visual_feats: tf.Tensor | None = None,
1107
+ visual_pos: tf.Tensor | None = None,
1108
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1109
+ visual_attention_mask: np.ndarray | tf.Tensor | None = None,
1110
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
1111
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
1112
+ output_attentions: Optional[bool] = None,
1113
+ output_hidden_states: Optional[bool] = None,
1114
+ return_dict: Optional[bool] = None,
1115
+ training: bool = False,
1116
+ ) -> Union[Tuple, TFLxmertModelOutput]:
1117
+ outputs = self.lxmert(
1118
+ input_ids,
1119
+ visual_feats,
1120
+ visual_pos,
1121
+ attention_mask,
1122
+ visual_attention_mask,
1123
+ token_type_ids,
1124
+ inputs_embeds,
1125
+ output_attentions,
1126
+ output_hidden_states,
1127
+ return_dict,
1128
+ training,
1129
+ )
1130
+
1131
+ return outputs
1132
+
1133
+ def build(self, input_shape=None):
1134
+ if self.built:
1135
+ return
1136
+ self.built = True
1137
+ if getattr(self, "lxmert", None) is not None:
1138
+ with tf.name_scope(self.lxmert.name):
1139
+ self.lxmert.build(None)
1140
+
1141
+
1142
+ class TFLxmertPooler(keras.layers.Layer):
1143
+ def __init__(self, config, **kwargs):
1144
+ super().__init__(**kwargs)
1145
+ self.dense = keras.layers.Dense(
1146
+ config.hidden_size,
1147
+ kernel_initializer=get_initializer(config.initializer_range),
1148
+ activation="tanh",
1149
+ name="dense",
1150
+ )
1151
+ self.config = config
1152
+
1153
+ def call(self, hidden_states):
1154
+ # We "pool" the model by simply taking the hidden state corresponding
1155
+ # to the first token.
1156
+ first_token_tensor = hidden_states[:, 0]
1157
+ pooled_output = self.dense(first_token_tensor)
1158
+ return pooled_output
1159
+
1160
+ def build(self, input_shape=None):
1161
+ if self.built:
1162
+ return
1163
+ self.built = True
1164
+ if getattr(self, "dense", None) is not None:
1165
+ with tf.name_scope(self.dense.name):
1166
+ self.dense.build([None, None, self.config.hidden_size])
1167
+
1168
+
1169
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert
1170
+ class TFLxmertPredictionHeadTransform(keras.layers.Layer):
1171
+ def __init__(self, config: LxmertConfig, **kwargs):
1172
+ super().__init__(**kwargs)
1173
+
1174
+ self.dense = keras.layers.Dense(
1175
+ units=config.hidden_size,
1176
+ kernel_initializer=get_initializer(config.initializer_range),
1177
+ name="dense",
1178
+ )
1179
+
1180
+ if isinstance(config.hidden_act, str):
1181
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
1182
+ else:
1183
+ self.transform_act_fn = config.hidden_act
1184
+
1185
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
1186
+ self.config = config
1187
+
1188
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1189
+ hidden_states = self.dense(inputs=hidden_states)
1190
+ hidden_states = self.transform_act_fn(hidden_states)
1191
+ hidden_states = self.LayerNorm(inputs=hidden_states)
1192
+
1193
+ return hidden_states
1194
+
1195
+ def build(self, input_shape=None):
1196
+ if self.built:
1197
+ return
1198
+ self.built = True
1199
+ if getattr(self, "dense", None) is not None:
1200
+ with tf.name_scope(self.dense.name):
1201
+ self.dense.build([None, None, self.config.hidden_size])
1202
+ if getattr(self, "LayerNorm", None) is not None:
1203
+ with tf.name_scope(self.LayerNorm.name):
1204
+ self.LayerNorm.build([None, None, self.config.hidden_size])
1205
+
1206
+
1207
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert
1208
+ class TFLxmertLMPredictionHead(keras.layers.Layer):
1209
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1210
+ super().__init__(**kwargs)
1211
+
1212
+ self.config = config
1213
+ self.hidden_size = config.hidden_size
1214
+
1215
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1216
+
1217
+ # The output weights are the same as the input embeddings, but there is
1218
+ # an output-only bias for each token.
1219
+ self.input_embeddings = input_embeddings
1220
+
1221
+ def build(self, input_shape=None):
1222
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
1223
+
1224
+ if self.built:
1225
+ return
1226
+ self.built = True
1227
+ if getattr(self, "transform", None) is not None:
1228
+ with tf.name_scope(self.transform.name):
1229
+ self.transform.build(None)
1230
+
1231
+ def get_output_embeddings(self) -> keras.layers.Layer:
1232
+ return self.input_embeddings
1233
+
1234
+ def set_output_embeddings(self, value: tf.Variable):
1235
+ self.input_embeddings.weight = value
1236
+ self.input_embeddings.vocab_size = shape_list(value)[0]
1237
+
1238
+ def get_bias(self) -> Dict[str, tf.Variable]:
1239
+ return {"bias": self.bias}
1240
+
1241
+ def set_bias(self, value: tf.Variable):
1242
+ self.bias = value["bias"]
1243
+ self.config.vocab_size = shape_list(value["bias"])[0]
1244
+
1245
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
1246
+ hidden_states = self.transform(hidden_states=hidden_states)
1247
+ seq_length = shape_list(hidden_states)[1]
1248
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
1249
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
1250
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
1251
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
1252
+
1253
+ return hidden_states
1254
+
1255
+
1256
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert
1257
+ class TFLxmertMLMHead(keras.layers.Layer):
1258
+ def __init__(self, config: LxmertConfig, input_embeddings: keras.layers.Layer, **kwargs):
1259
+ super().__init__(**kwargs)
1260
+
1261
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1262
+
1263
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
1264
+ prediction_scores = self.predictions(hidden_states=sequence_output)
1265
+
1266
+ return prediction_scores
1267
+
1268
+ def build(self, input_shape=None):
1269
+ if self.built:
1270
+ return
1271
+ self.built = True
1272
+ if getattr(self, "predictions", None) is not None:
1273
+ with tf.name_scope(self.predictions.name):
1274
+ self.predictions.build(None)
1275
+
1276
+
1277
+ class TFLxmertPreTrainingHeads(keras.layers.Layer):
1278
+ def __init__(self, config, input_embeddings, **kwargs):
1279
+ super().__init__(**kwargs)
1280
+ self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions")
1281
+
1282
+ self.seq_relationship = keras.layers.Dense(
1283
+ 2,
1284
+ kernel_initializer=get_initializer(config.initializer_range),
1285
+ name="seq_relationship",
1286
+ )
1287
+ self.config = config
1288
+
1289
+ def call(self, sequence_output, pooled_output):
1290
+ prediction_scores = self.predictions(sequence_output)
1291
+ seq_relationship_score = self.seq_relationship(pooled_output)
1292
+ return prediction_scores, seq_relationship_score
1293
+
1294
+ def build(self, input_shape=None):
1295
+ if self.built:
1296
+ return
1297
+ self.built = True
1298
+ if getattr(self, "predictions", None) is not None:
1299
+ with tf.name_scope(self.predictions.name):
1300
+ self.predictions.build(None)
1301
+ if getattr(self, "seq_relationship", None) is not None:
1302
+ with tf.name_scope(self.seq_relationship.name):
1303
+ self.seq_relationship.build([None, None, self.config.hidden_size])
1304
+
1305
+
1306
+ class TFLxmertVisualAnswerHead(keras.layers.Layer):
1307
+ def __init__(self, config, num_labels, **kwargs):
1308
+ super().__init__(**kwargs)
1309
+ hid_dim = config.hidden_size
1310
+ self.dense = keras.layers.Dense(
1311
+ hid_dim * 2,
1312
+ kernel_initializer=get_initializer(config.initializer_range),
1313
+ name="logit_fc_._0",
1314
+ )
1315
+ self.activation = get_tf_activation("gelu")
1316
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2")
1317
+ self.dense_1 = keras.layers.Dense(
1318
+ num_labels,
1319
+ kernel_initializer=get_initializer(config.initializer_range),
1320
+ name="logit_fc_._3",
1321
+ )
1322
+ self.hid_dim = hid_dim
1323
+
1324
+ def call(self, hidden_states):
1325
+ hidden_states = self.dense(hidden_states)
1326
+ hidden_states = self.activation(hidden_states)
1327
+ hidden_states = self.layer_norm(hidden_states)
1328
+ hidden_states = self.dense_1(hidden_states)
1329
+
1330
+ return hidden_states
1331
+
1332
+ def build(self, input_shape=None):
1333
+ if self.built:
1334
+ return
1335
+ self.built = True
1336
+ if getattr(self, "dense", None) is not None:
1337
+ with tf.name_scope(self.dense.name):
1338
+ self.dense.build([None, None, self.hid_dim])
1339
+ if getattr(self, "layer_norm", None) is not None:
1340
+ with tf.name_scope(self.layer_norm.name):
1341
+ self.layer_norm.build([None, self.hid_dim * 2])
1342
+ if getattr(self, "dense_1", None) is not None:
1343
+ with tf.name_scope(self.dense_1.name):
1344
+ self.dense_1.build([None, None, self.hid_dim * 2])
1345
+
1346
+
1347
+ class TFLxmertVisualObjHead(keras.layers.Layer):
1348
+ def __init__(self, config, **kwargs):
1349
+ super().__init__(**kwargs)
1350
+ self.transform = TFLxmertPredictionHeadTransform(config, name="transform")
1351
+
1352
+ # Decide the use of visual losses
1353
+ visual_losses = {}
1354
+ if config.visual_obj_loss:
1355
+ visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels}
1356
+ if config.visual_attr_loss:
1357
+ visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels}
1358
+ if config.visual_feat_loss:
1359
+ visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim}
1360
+ self.visual_losses = visual_losses
1361
+
1362
+ # The output weights are the same as the input embeddings, but there is
1363
+ # an output-only bias for each token.
1364
+ self.decoder_dict = {
1365
+ key: keras.layers.Dense(
1366
+ self.visual_losses[key]["num"],
1367
+ kernel_initializer=get_initializer(config.initializer_range),
1368
+ name=f"decoder_dict.{key}",
1369
+ )
1370
+ for key in self.visual_losses
1371
+ }
1372
+ self.config = config
1373
+
1374
+ def call(self, hidden_states):
1375
+ hidden_states = self.transform(hidden_states)
1376
+ output = {}
1377
+ for key in self.visual_losses:
1378
+ output[key] = self.decoder_dict[key](hidden_states)
1379
+ return output
1380
+
1381
+ def build(self, input_shape=None):
1382
+ if self.built:
1383
+ return
1384
+ self.built = True
1385
+ if getattr(self, "transform", None) is not None:
1386
+ with tf.name_scope(self.transform.name):
1387
+ self.transform.build(None)
1388
+ if getattr(self, "decoder_dict", None) is not None:
1389
+ for layer in self.decoder_dict.values():
1390
+ with tf.name_scope(layer.name):
1391
+ layer.build([None, None, self.config.hidden_size])
1392
+
1393
+
1394
+ @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING)
1395
+ class TFLxmertForPreTraining(TFLxmertPreTrainedModel):
1396
+ def __init__(self, config, *inputs, **kwargs):
1397
+ super().__init__(config, *inputs, **kwargs)
1398
+
1399
+ self.config = config
1400
+ self.num_qa_labels = config.num_qa_labels
1401
+ self.visual_loss_normalizer = config.visual_loss_normalizer
1402
+
1403
+ # Use of pretraining tasks
1404
+ self.task_mask_lm = config.task_mask_lm
1405
+ self.task_obj_predict = config.task_obj_predict
1406
+ self.task_matched = config.task_matched
1407
+ self.task_qa = config.task_qa
1408
+
1409
+ # Lxmert backbone
1410
+ self.lxmert = TFLxmertMainLayer(config, name="lxmert")
1411
+
1412
+ # Pre-training heads
1413
+ self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls")
1414
+ if self.task_obj_predict:
1415
+ self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head")
1416
+ if self.task_qa:
1417
+ self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head")
1418
+
1419
+ # Loss functions
1420
+ self.loss_fcts = {
1421
+ "l2": keras.losses.Huber(delta=1.0, name="huber_loss"),
1422
+ "visn_ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1423
+ "ce": keras.losses.SparseCategoricalCrossentropy(from_logits=True),
1424
+ }
1425
+
1426
+ visual_losses = {}
1427
+ if config.visual_obj_loss:
1428
+ visual_losses["obj"] = {
1429
+ "shape": (-1,),
1430
+ "num": config.num_object_labels,
1431
+ "loss": "visn_ce",
1432
+ }
1433
+ if config.visual_attr_loss:
1434
+ visual_losses["attr"] = {
1435
+ "shape": (-1,),
1436
+ "num": config.num_attr_labels,
1437
+ "loss": "visn_ce",
1438
+ }
1439
+ if config.visual_feat_loss:
1440
+ visual_losses["feat"] = {
1441
+ "shape": (-1, config.visual_feat_dim),
1442
+ "num": config.visual_feat_dim,
1443
+ "loss": "l2",
1444
+ }
1445
+ self.visual_losses = visual_losses
1446
+
1447
+ @property
1448
+ def dummy_inputs(self):
1449
+ """
1450
+ Dummy inputs to build the network.
1451
+
1452
+ Returns:
1453
+ tf.Tensor with dummy inputs
1454
+ """
1455
+ batch_size = 2
1456
+ num_visual_features = 10
1457
+ input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32)
1458
+ visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))
1459
+ visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))
1460
+
1461
+ if self.config.task_obj_predict:
1462
+ obj_labels = {}
1463
+ if self.config.visual_attr_loss and self.config.task_obj_predict:
1464
+ obj_labels["attr"] = (
1465
+ tf.ones([batch_size, num_visual_features]),
1466
+ tf.ones([batch_size, num_visual_features]),
1467
+ )
1468
+ if self.config.visual_feat_loss and self.config.task_obj_predict:
1469
+ obj_labels["feat"] = (
1470
+ tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]),
1471
+ tf.ones([batch_size, num_visual_features]),
1472
+ )
1473
+ if self.config.visual_obj_loss and self.config.task_obj_predict:
1474
+ obj_labels["obj"] = (
1475
+ tf.ones([batch_size, num_visual_features]),
1476
+ tf.ones([batch_size, num_visual_features]),
1477
+ )
1478
+
1479
+ return {
1480
+ **{
1481
+ "input_ids": input_ids,
1482
+ "visual_feats": visual_feats,
1483
+ "visual_pos": visual_pos,
1484
+ },
1485
+ **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}),
1486
+ }
1487
+
1488
+ def get_lm_head(self):
1489
+ return self.cls.predictions
1490
+
1491
+ def get_prefix_bias_name(self):
1492
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
1493
+ return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name
1494
+
1495
+ @unpack_inputs
1496
+ @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)
1497
+ @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
1498
+ def call(
1499
+ self,
1500
+ input_ids: TFModelInputType | None = None,
1501
+ visual_feats: tf.Tensor | None = None,
1502
+ visual_pos: tf.Tensor | None = None,
1503
+ attention_mask: tf.Tensor | None = None,
1504
+ visual_attention_mask: tf.Tensor | None = None,
1505
+ token_type_ids: tf.Tensor | None = None,
1506
+ inputs_embeds: tf.Tensor | None = None,
1507
+ masked_lm_labels: tf.Tensor | None = None,
1508
+ obj_labels: Dict[str, Tuple[tf.Tensor, tf.Tensor]] | None = None,
1509
+ matched_label: tf.Tensor | None = None,
1510
+ ans: tf.Tensor | None = None,
1511
+ output_attentions: bool | None = None,
1512
+ output_hidden_states: bool | None = None,
1513
+ return_dict: bool | None = None,
1514
+ training: bool = False,
1515
+ ) -> Tuple[tf.Tensor] | TFLxmertForPreTrainingOutput:
1516
+ r"""
1517
+ masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1518
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
1519
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
1520
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
1521
+ obj_labels (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`):
1522
+ each key is named after each one of the visual losses and each element of the tuple is of the shape
1523
+ `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
1524
+ the label score respectively
1525
+ matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):
1526
+ Labels for computing the whether or not the text input matches the image (classification) loss. Input
1527
+ should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
1528
+
1529
+ - 0 indicates that the sentence does not match the image,
1530
+ - 1 indicates that the sentence does match the image.
1531
+ ans (`tf.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`):
1532
+ a one hot representation hof the correct answer *optional*
1533
+
1534
+ Returns:
1535
+ """
1536
+
1537
+ lxmert_output = self.lxmert(
1538
+ input_ids,
1539
+ visual_feats,
1540
+ visual_pos,
1541
+ attention_mask,
1542
+ visual_attention_mask,
1543
+ token_type_ids,
1544
+ inputs_embeds,
1545
+ output_attentions,
1546
+ output_hidden_states,
1547
+ return_dict,
1548
+ training,
1549
+ )
1550
+
1551
+ lang_output, visual_output, pooled_output = (
1552
+ lxmert_output[0],
1553
+ lxmert_output[1],
1554
+ lxmert_output[2],
1555
+ )
1556
+ lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
1557
+ if self.task_qa:
1558
+ answer_score = self.answer_head(pooled_output)
1559
+ else:
1560
+ answer_score = pooled_output[0][0]
1561
+
1562
+ total_loss = (
1563
+ None
1564
+ if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None)
1565
+ else tf.constant(0.0)
1566
+ )
1567
+ losses = ()
1568
+ if masked_lm_labels is not None and self.task_mask_lm:
1569
+ masked_lm_loss = self.loss_fcts["ce"](
1570
+ tf.reshape(masked_lm_labels, [-1]),
1571
+ tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]),
1572
+ )
1573
+ total_loss += masked_lm_loss
1574
+ losses += (masked_lm_loss,)
1575
+ if matched_label is not None and self.task_matched:
1576
+ matched_loss = self.loss_fcts["ce"](
1577
+ tf.reshape(matched_label, [-1]),
1578
+ tf.reshape(cross_relationship_score, [-1, 2]),
1579
+ )
1580
+ total_loss += matched_loss
1581
+ losses += (matched_loss,)
1582
+ if obj_labels is not None and self.task_obj_predict:
1583
+ total_visn_loss = 0.0
1584
+ visn_prediction_scores_dict = self.obj_predict_head(visual_output)
1585
+ for key, key_info in self.visual_losses.items():
1586
+ label, mask_conf = obj_labels[key]
1587
+ output_dim = key_info["num"]
1588
+ loss_fct_name = key_info["loss"]
1589
+ label_shape = key_info["shape"]
1590
+ weight = self.visual_loss_normalizer
1591
+ visn_loss_fct = self.loss_fcts[loss_fct_name]
1592
+ visn_prediction_scores = visn_prediction_scores_dict[key]
1593
+ visn_loss = visn_loss_fct(
1594
+ tf.reshape(label, label_shape),
1595
+ tf.reshape(visn_prediction_scores, [-1, output_dim]),
1596
+ )
1597
+
1598
+ if visn_loss.ndim > 1: # Regression Losses
1599
+ visn_loss = tf.reduce_mean(visn_loss)
1600
+ visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight
1601
+ total_visn_loss += visn_loss
1602
+ losses += (visn_loss,)
1603
+ total_loss += total_visn_loss
1604
+ if ans is not None and self.task_qa:
1605
+ answer_loss = self.loss_fcts["ce"](
1606
+ tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels])
1607
+ )
1608
+ # exclude "*2" here to match the effect of QA losses.
1609
+ # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)
1610
+ # Now : (loss *1) for 12 epochs
1611
+ #
1612
+ # * 2 # Multiply by 2 because > half of the data will not have label
1613
+ total_loss += answer_loss
1614
+ losses += (answer_loss,)
1615
+ # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach()
1616
+
1617
+ if not return_dict:
1618
+ output = (
1619
+ lang_prediction_scores,
1620
+ cross_relationship_score,
1621
+ answer_score,
1622
+ ) + lxmert_output[3:]
1623
+ return ((total_loss,) + output) if total_loss is not None else output
1624
+
1625
+ return TFLxmertForPreTrainingOutput(
1626
+ loss=total_loss,
1627
+ prediction_logits=lang_prediction_scores,
1628
+ cross_relationship_score=cross_relationship_score,
1629
+ question_answering_score=answer_score,
1630
+ language_hidden_states=lxmert_output.language_hidden_states,
1631
+ vision_hidden_states=lxmert_output.vision_hidden_states,
1632
+ language_attentions=lxmert_output.language_attentions,
1633
+ vision_attentions=lxmert_output.vision_attentions,
1634
+ cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
1635
+ )
1636
+
1637
+ def build(self, input_shape=None):
1638
+ if self.built:
1639
+ return
1640
+ self.built = True
1641
+ if getattr(self, "lxmert", None) is not None:
1642
+ with tf.name_scope(self.lxmert.name):
1643
+ self.lxmert.build(None)
1644
+ if getattr(self, "cls", None) is not None:
1645
+ with tf.name_scope(self.cls.name):
1646
+ self.cls.build(None)
1647
+ if getattr(self, "obj_predict_head", None) is not None:
1648
+ with tf.name_scope(self.obj_predict_head.name):
1649
+ self.obj_predict_head.build(None)
1650
+ if getattr(self, "answer_head", None) is not None:
1651
+ with tf.name_scope(self.answer_head.name):
1652
+ self.answer_head.build(None)
1653
+
1654
+
1655
+ __all__ = [
1656
+ "TFLxmertForPreTraining",
1657
+ "TFLxmertMainLayer",
1658
+ "TFLxmertModel",
1659
+ "TFLxmertPreTrainedModel",
1660
+ "TFLxmertVisualFeatureEncoder",
1661
+ ]
janus/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import collections
17
+ import os
18
+ import unicodedata
19
+ from typing import List, Optional, Tuple
20
+
21
+ from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
22
+ from ...utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
28
+
29
+
30
+ # Copied from transformers.models.bert.tokenization_bert.load_vocab
31
+ def load_vocab(vocab_file):
32
+ """Loads a vocabulary file into a dictionary."""
33
+ vocab = collections.OrderedDict()
34
+ with open(vocab_file, "r", encoding="utf-8") as reader:
35
+ tokens = reader.readlines()
36
+ for index, token in enumerate(tokens):
37
+ token = token.rstrip("\n")
38
+ vocab[token] = index
39
+ return vocab
40
+
41
+
42
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
43
+ def whitespace_tokenize(text):
44
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
45
+ text = text.strip()
46
+ if not text:
47
+ return []
48
+ tokens = text.split()
49
+ return tokens
50
+
51
+
52
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, BertTokenizer->LxmertTokenizer
53
+ class LxmertTokenizer(PreTrainedTokenizer):
54
+ r"""
55
+ Construct a Lxmert tokenizer. Based on WordPiece.
56
+
57
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
58
+ this superclass for more information regarding those methods.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ File containing the vocabulary.
63
+ do_lower_case (`bool`, *optional*, defaults to `True`):
64
+ Whether or not to lowercase the input when tokenizing.
65
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
66
+ Whether or not to do basic tokenization before WordPiece.
67
+ never_split (`Iterable`, *optional*):
68
+ Collection of tokens which will never be split during tokenization. Only has an effect when
69
+ `do_basic_tokenize=True`
70
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
71
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
72
+ token instead.
73
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
74
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
75
+ sequence classification or for a text and a question for question answering. It is also used as the last
76
+ token of a sequence built with special tokens.
77
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
78
+ The token used for padding, for example when batching sequences of different lengths.
79
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
80
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
81
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
82
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
83
+ The token used for masking values. This is the token used when training this model with masked language
84
+ modeling. This is the token which the model will try to predict.
85
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
86
+ Whether or not to tokenize Chinese characters.
87
+
88
+ This should likely be deactivated for Japanese (see this
89
+ [issue](https://github.com/huggingface/transformers/issues/328)).
90
+ strip_accents (`bool`, *optional*):
91
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
92
+ value for `lowercase` (as in the original Lxmert).
93
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
94
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
95
+ extra spaces.
96
+ """
97
+
98
+ vocab_files_names = VOCAB_FILES_NAMES
99
+
100
+ def __init__(
101
+ self,
102
+ vocab_file,
103
+ do_lower_case=True,
104
+ do_basic_tokenize=True,
105
+ never_split=None,
106
+ unk_token="[UNK]",
107
+ sep_token="[SEP]",
108
+ pad_token="[PAD]",
109
+ cls_token="[CLS]",
110
+ mask_token="[MASK]",
111
+ tokenize_chinese_chars=True,
112
+ strip_accents=None,
113
+ clean_up_tokenization_spaces=True,
114
+ **kwargs,
115
+ ):
116
+ if not os.path.isfile(vocab_file):
117
+ raise ValueError(
118
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
119
+ " model use `tokenizer = LxmertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
120
+ )
121
+ self.vocab = load_vocab(vocab_file)
122
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
123
+ self.do_basic_tokenize = do_basic_tokenize
124
+ if do_basic_tokenize:
125
+ self.basic_tokenizer = BasicTokenizer(
126
+ do_lower_case=do_lower_case,
127
+ never_split=never_split,
128
+ tokenize_chinese_chars=tokenize_chinese_chars,
129
+ strip_accents=strip_accents,
130
+ )
131
+
132
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
133
+
134
+ super().__init__(
135
+ do_lower_case=do_lower_case,
136
+ do_basic_tokenize=do_basic_tokenize,
137
+ never_split=never_split,
138
+ unk_token=unk_token,
139
+ sep_token=sep_token,
140
+ pad_token=pad_token,
141
+ cls_token=cls_token,
142
+ mask_token=mask_token,
143
+ tokenize_chinese_chars=tokenize_chinese_chars,
144
+ strip_accents=strip_accents,
145
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
146
+ **kwargs,
147
+ )
148
+
149
+ @property
150
+ def do_lower_case(self):
151
+ return self.basic_tokenizer.do_lower_case
152
+
153
+ @property
154
+ def vocab_size(self):
155
+ return len(self.vocab)
156
+
157
+ def get_vocab(self):
158
+ return dict(self.vocab, **self.added_tokens_encoder)
159
+
160
+ def _tokenize(self, text, split_special_tokens=False):
161
+ split_tokens = []
162
+ if self.do_basic_tokenize:
163
+ for token in self.basic_tokenizer.tokenize(
164
+ text, never_split=self.all_special_tokens if not split_special_tokens else None
165
+ ):
166
+ # If the token is part of the never_split set
167
+ if token in self.basic_tokenizer.never_split:
168
+ split_tokens.append(token)
169
+ else:
170
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
171
+ else:
172
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
173
+ return split_tokens
174
+
175
+ def _convert_token_to_id(self, token):
176
+ """Converts a token (str) in an id using the vocab."""
177
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
178
+
179
+ def _convert_id_to_token(self, index):
180
+ """Converts an index (integer) in a token (str) using the vocab."""
181
+ return self.ids_to_tokens.get(index, self.unk_token)
182
+
183
+ def convert_tokens_to_string(self, tokens):
184
+ """Converts a sequence of tokens (string) in a single string."""
185
+ out_string = " ".join(tokens).replace(" ##", "").strip()
186
+ return out_string
187
+
188
+ def build_inputs_with_special_tokens(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
190
+ ) -> List[int]:
191
+ """
192
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
193
+ adding special tokens. A Lxmert sequence has the following format:
194
+
195
+ - single sequence: `[CLS] X [SEP]`
196
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
197
+
198
+ Args:
199
+ token_ids_0 (`List[int]`):
200
+ List of IDs to which the special tokens will be added.
201
+ token_ids_1 (`List[int]`, *optional*):
202
+ Optional second list of IDs for sequence pairs.
203
+
204
+ Returns:
205
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
206
+ """
207
+ if token_ids_1 is None:
208
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
209
+ cls = [self.cls_token_id]
210
+ sep = [self.sep_token_id]
211
+ return cls + token_ids_0 + sep + token_ids_1 + sep
212
+
213
+ def get_special_tokens_mask(
214
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
215
+ ) -> List[int]:
216
+ """
217
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
218
+ special tokens using the tokenizer `prepare_for_model` method.
219
+
220
+ Args:
221
+ token_ids_0 (`List[int]`):
222
+ List of IDs.
223
+ token_ids_1 (`List[int]`, *optional*):
224
+ Optional second list of IDs for sequence pairs.
225
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
226
+ Whether or not the token list is already formatted with special tokens for the model.
227
+
228
+ Returns:
229
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
230
+ """
231
+
232
+ if already_has_special_tokens:
233
+ return super().get_special_tokens_mask(
234
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
235
+ )
236
+
237
+ if token_ids_1 is not None:
238
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
239
+ return [1] + ([0] * len(token_ids_0)) + [1]
240
+
241
+ def create_token_type_ids_from_sequences(
242
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
243
+ ) -> List[int]:
244
+ """
245
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
246
+ pair mask has the following format:
247
+
248
+ ```
249
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
250
+ | first sequence | second sequence |
251
+ ```
252
+
253
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
254
+
255
+ Args:
256
+ token_ids_0 (`List[int]`):
257
+ List of IDs.
258
+ token_ids_1 (`List[int]`, *optional*):
259
+ Optional second list of IDs for sequence pairs.
260
+
261
+ Returns:
262
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
263
+ """
264
+ sep = [self.sep_token_id]
265
+ cls = [self.cls_token_id]
266
+ if token_ids_1 is None:
267
+ return len(cls + token_ids_0 + sep) * [0]
268
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
269
+
270
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
271
+ index = 0
272
+ if os.path.isdir(save_directory):
273
+ vocab_file = os.path.join(
274
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
275
+ )
276
+ else:
277
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
278
+ with open(vocab_file, "w", encoding="utf-8") as writer:
279
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
280
+ if index != token_index:
281
+ logger.warning(
282
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
283
+ " Please check that the vocabulary is not corrupted!"
284
+ )
285
+ index = token_index
286
+ writer.write(token + "\n")
287
+ index += 1
288
+ return (vocab_file,)
289
+
290
+
291
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
292
+ class BasicTokenizer:
293
+ """
294
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
295
+
296
+ Args:
297
+ do_lower_case (`bool`, *optional*, defaults to `True`):
298
+ Whether or not to lowercase the input when tokenizing.
299
+ never_split (`Iterable`, *optional*):
300
+ Collection of tokens which will never be split during tokenization. Only has an effect when
301
+ `do_basic_tokenize=True`
302
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
303
+ Whether or not to tokenize Chinese characters.
304
+
305
+ This should likely be deactivated for Japanese (see this
306
+ [issue](https://github.com/huggingface/transformers/issues/328)).
307
+ strip_accents (`bool`, *optional*):
308
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
309
+ value for `lowercase` (as in the original BERT).
310
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
311
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
312
+ the full context of the words, such as contractions.
313
+ """
314
+
315
+ def __init__(
316
+ self,
317
+ do_lower_case=True,
318
+ never_split=None,
319
+ tokenize_chinese_chars=True,
320
+ strip_accents=None,
321
+ do_split_on_punc=True,
322
+ ):
323
+ if never_split is None:
324
+ never_split = []
325
+ self.do_lower_case = do_lower_case
326
+ self.never_split = set(never_split)
327
+ self.tokenize_chinese_chars = tokenize_chinese_chars
328
+ self.strip_accents = strip_accents
329
+ self.do_split_on_punc = do_split_on_punc
330
+
331
+ def tokenize(self, text, never_split=None):
332
+ """
333
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
334
+
335
+ Args:
336
+ never_split (`List[str]`, *optional*)
337
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
338
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
339
+ """
340
+ # union() returns a new set by concatenating the two sets.
341
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
342
+ text = self._clean_text(text)
343
+
344
+ # This was added on November 1st, 2018 for the multilingual and Chinese
345
+ # models. This is also applied to the English models now, but it doesn't
346
+ # matter since the English models were not trained on any Chinese data
347
+ # and generally don't have any Chinese data in them (there are Chinese
348
+ # characters in the vocabulary because Wikipedia does have some Chinese
349
+ # words in the English Wikipedia.).
350
+ if self.tokenize_chinese_chars:
351
+ text = self._tokenize_chinese_chars(text)
352
+ # prevents treating the same character with different unicode codepoints as different characters
353
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
354
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
355
+ split_tokens = []
356
+ for token in orig_tokens:
357
+ if token not in never_split:
358
+ if self.do_lower_case:
359
+ token = token.lower()
360
+ if self.strip_accents is not False:
361
+ token = self._run_strip_accents(token)
362
+ elif self.strip_accents:
363
+ token = self._run_strip_accents(token)
364
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
365
+
366
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
367
+ return output_tokens
368
+
369
+ def _run_strip_accents(self, text):
370
+ """Strips accents from a piece of text."""
371
+ text = unicodedata.normalize("NFD", text)
372
+ output = []
373
+ for char in text:
374
+ cat = unicodedata.category(char)
375
+ if cat == "Mn":
376
+ continue
377
+ output.append(char)
378
+ return "".join(output)
379
+
380
+ def _run_split_on_punc(self, text, never_split=None):
381
+ """Splits punctuation on a piece of text."""
382
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
383
+ return [text]
384
+ chars = list(text)
385
+ i = 0
386
+ start_new_word = True
387
+ output = []
388
+ while i < len(chars):
389
+ char = chars[i]
390
+ if _is_punctuation(char):
391
+ output.append([char])
392
+ start_new_word = True
393
+ else:
394
+ if start_new_word:
395
+ output.append([])
396
+ start_new_word = False
397
+ output[-1].append(char)
398
+ i += 1
399
+
400
+ return ["".join(x) for x in output]
401
+
402
+ def _tokenize_chinese_chars(self, text):
403
+ """Adds whitespace around any CJK character."""
404
+ output = []
405
+ for char in text:
406
+ cp = ord(char)
407
+ if self._is_chinese_char(cp):
408
+ output.append(" ")
409
+ output.append(char)
410
+ output.append(" ")
411
+ else:
412
+ output.append(char)
413
+ return "".join(output)
414
+
415
+ def _is_chinese_char(self, cp):
416
+ """Checks whether CP is the codepoint of a CJK character."""
417
+ # This defines a "chinese character" as anything in the CJK Unicode block:
418
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
419
+ #
420
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
421
+ # despite its name. The modern Korean Hangul alphabet is a different block,
422
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
423
+ # space-separated words, so they are not treated specially and handled
424
+ # like the all of the other languages.
425
+ if (
426
+ (cp >= 0x4E00 and cp <= 0x9FFF)
427
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
428
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
429
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
430
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
431
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
432
+ or (cp >= 0xF900 and cp <= 0xFAFF)
433
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
434
+ ): #
435
+ return True
436
+
437
+ return False
438
+
439
+ def _clean_text(self, text):
440
+ """Performs invalid character removal and whitespace cleanup on text."""
441
+ output = []
442
+ for char in text:
443
+ cp = ord(char)
444
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
445
+ continue
446
+ if _is_whitespace(char):
447
+ output.append(" ")
448
+ else:
449
+ output.append(char)
450
+ return "".join(output)
451
+
452
+
453
+ # Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
454
+ class WordpieceTokenizer:
455
+ """Runs WordPiece tokenization."""
456
+
457
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
458
+ self.vocab = vocab
459
+ self.unk_token = unk_token
460
+ self.max_input_chars_per_word = max_input_chars_per_word
461
+
462
+ def tokenize(self, text):
463
+ """
464
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
465
+ tokenization using the given vocabulary.
466
+
467
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
468
+
469
+ Args:
470
+ text: A single token or whitespace separated tokens. This should have
471
+ already been passed through *BasicTokenizer*.
472
+
473
+ Returns:
474
+ A list of wordpiece tokens.
475
+ """
476
+
477
+ output_tokens = []
478
+ for token in whitespace_tokenize(text):
479
+ chars = list(token)
480
+ if len(chars) > self.max_input_chars_per_word:
481
+ output_tokens.append(self.unk_token)
482
+ continue
483
+
484
+ is_bad = False
485
+ start = 0
486
+ sub_tokens = []
487
+ while start < len(chars):
488
+ end = len(chars)
489
+ cur_substr = None
490
+ while start < end:
491
+ substr = "".join(chars[start:end])
492
+ if start > 0:
493
+ substr = "##" + substr
494
+ if substr in self.vocab:
495
+ cur_substr = substr
496
+ break
497
+ end -= 1
498
+ if cur_substr is None:
499
+ is_bad = True
500
+ break
501
+ sub_tokens.append(cur_substr)
502
+ start = end
503
+
504
+ if is_bad:
505
+ output_tokens.append(self.unk_token)
506
+ else:
507
+ output_tokens.extend(sub_tokens)
508
+ return output_tokens
509
+
510
+
511
+ __all__ = ["LxmertTokenizer"]
janus/lib/python3.10/site-packages/transformers/models/lxmert/tokenization_lxmert_fast.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from typing import List, Optional, Tuple
18
+
19
+ from tokenizers import normalizers
20
+
21
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
22
+ from .tokenization_lxmert import LxmertTokenizer
23
+
24
+
25
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
26
+
27
+
28
+ # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->unc-nlp/lxmert-base-uncased, BERT->Lxmert, Bert->Lxmert
29
+ class LxmertTokenizerFast(PreTrainedTokenizerFast):
30
+ r"""
31
+ Construct a "fast" Lxmert tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
32
+
33
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
34
+ refer to this superclass for more information regarding those methods.
35
+
36
+ Args:
37
+ vocab_file (`str`):
38
+ File containing the vocabulary.
39
+ do_lower_case (`bool`, *optional*, defaults to `True`):
40
+ Whether or not to lowercase the input when tokenizing.
41
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
42
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
43
+ token instead.
44
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
45
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
46
+ sequence classification or for a text and a question for question answering. It is also used as the last
47
+ token of a sequence built with special tokens.
48
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
49
+ The token used for padding, for example when batching sequences of different lengths.
50
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
51
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
52
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
53
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
54
+ The token used for masking values. This is the token used when training this model with masked language
55
+ modeling. This is the token which the model will try to predict.
56
+ clean_text (`bool`, *optional*, defaults to `True`):
57
+ Whether or not to clean the text before tokenization by removing any control characters and replacing all
58
+ whitespaces by the classic one.
59
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
60
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
61
+ issue](https://github.com/huggingface/transformers/issues/328)).
62
+ strip_accents (`bool`, *optional*):
63
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
64
+ value for `lowercase` (as in the original Lxmert).
65
+ wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
66
+ The prefix for subwords.
67
+ """
68
+
69
+ vocab_files_names = VOCAB_FILES_NAMES
70
+ slow_tokenizer_class = LxmertTokenizer
71
+
72
+ def __init__(
73
+ self,
74
+ vocab_file=None,
75
+ tokenizer_file=None,
76
+ do_lower_case=True,
77
+ unk_token="[UNK]",
78
+ sep_token="[SEP]",
79
+ pad_token="[PAD]",
80
+ cls_token="[CLS]",
81
+ mask_token="[MASK]",
82
+ tokenize_chinese_chars=True,
83
+ strip_accents=None,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(
87
+ vocab_file,
88
+ tokenizer_file=tokenizer_file,
89
+ do_lower_case=do_lower_case,
90
+ unk_token=unk_token,
91
+ sep_token=sep_token,
92
+ pad_token=pad_token,
93
+ cls_token=cls_token,
94
+ mask_token=mask_token,
95
+ tokenize_chinese_chars=tokenize_chinese_chars,
96
+ strip_accents=strip_accents,
97
+ **kwargs,
98
+ )
99
+
100
+ normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
101
+ if (
102
+ normalizer_state.get("lowercase", do_lower_case) != do_lower_case
103
+ or normalizer_state.get("strip_accents", strip_accents) != strip_accents
104
+ or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
105
+ ):
106
+ normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
107
+ normalizer_state["lowercase"] = do_lower_case
108
+ normalizer_state["strip_accents"] = strip_accents
109
+ normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
110
+ self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
111
+
112
+ self.do_lower_case = do_lower_case
113
+
114
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
115
+ """
116
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
117
+ adding special tokens. A Lxmert sequence has the following format:
118
+
119
+ - single sequence: `[CLS] X [SEP]`
120
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
121
+
122
+ Args:
123
+ token_ids_0 (`List[int]`):
124
+ List of IDs to which the special tokens will be added.
125
+ token_ids_1 (`List[int]`, *optional*):
126
+ Optional second list of IDs for sequence pairs.
127
+
128
+ Returns:
129
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
130
+ """
131
+ output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
132
+
133
+ if token_ids_1 is not None:
134
+ output += token_ids_1 + [self.sep_token_id]
135
+
136
+ return output
137
+
138
+ def create_token_type_ids_from_sequences(
139
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
140
+ ) -> List[int]:
141
+ """
142
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Lxmert sequence
143
+ pair mask has the following format:
144
+
145
+ ```
146
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
147
+ | first sequence | second sequence |
148
+ ```
149
+
150
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
151
+
152
+ Args:
153
+ token_ids_0 (`List[int]`):
154
+ List of IDs.
155
+ token_ids_1 (`List[int]`, *optional*):
156
+ Optional second list of IDs for sequence pairs.
157
+
158
+ Returns:
159
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
160
+ """
161
+ sep = [self.sep_token_id]
162
+ cls = [self.cls_token_id]
163
+ if token_ids_1 is None:
164
+ return len(cls + token_ids_0 + sep) * [0]
165
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
166
+
167
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
168
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
169
+ return tuple(files)
170
+
171
+
172
+ __all__ = ["LxmertTokenizerFast"]
janus/lib/python3.10/site-packages/transformers/models/mamba2/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_mamba2 import *
22
+ from .modeling_mamba2 import *
23
+ else:
24
+ import sys
25
+
26
+ _file = globals()["__file__"]
27
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/mamba2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (537 Bytes). View file
 
janus/lib/python3.10/site-packages/transformers/models/mvp/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import _LazyModule
17
+ from ...utils.import_utils import define_import_structure
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from .configuration_mvp import *
22
+ from .modeling_mvp import *
23
+ from .tokenization_mvp import *
24
+ from .tokenization_mvp_fast import *
25
+ else:
26
+ import sys
27
+
28
+ _file = globals()["__file__"]
29
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
janus/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/configuration_mvp.cpython-310.pyc ADDED
Binary file (7.02 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/mvp/__pycache__/tokenization_mvp_fast.cpython-310.pyc ADDED
Binary file (9.39 kB). View file
 
janus/lib/python3.10/site-packages/transformers/models/mvp/configuration_mvp.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """MVP model configuration"""
16
+
17
+ import warnings
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class MvpConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`MvpModel`]. It is used to instantiate a MVP model
29
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
30
+ defaults will yield a similar configuration to that of the MVP [RUCAIBox/mvp](https://huggingface.co/RUCAIBox/mvp)
31
+ architecture.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 50267):
39
+ Vocabulary size of the MVP model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`MvpModel`].
41
+ d_model (`int`, *optional*, defaults to 1024):
42
+ Dimensionality of the layers and the pooler layer.
43
+ encoder_layers (`int`, *optional*, defaults to 12):
44
+ Number of encoder layers.
45
+ decoder_layers (`int`, *optional*, defaults to 12):
46
+ Number of decoder layers.
47
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
50
+ Number of attention heads for each attention layer in the Transformer decoder.
51
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
52
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
53
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
54
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
55
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
56
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
57
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
58
+ dropout (`float`, *optional*, defaults to 0.1):
59
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
60
+ attention_dropout (`float`, *optional*, defaults to 0.0):
61
+ The dropout ratio for the attention probabilities.
62
+ activation_dropout (`float`, *optional*, defaults to 0.0):
63
+ The dropout ratio for activations inside the fully connected layer.
64
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
65
+ The dropout ratio for classifier.
66
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
67
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
68
+ just in case (e.g., 512 or 1024 or 2048).
69
+ init_std (`float`, *optional*, defaults to 0.02):
70
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
71
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
72
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
73
+ for more details.
74
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
75
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
76
+ for more details.
77
+ scale_embedding (`bool`, *optional*, defaults to `False`):
78
+ Scale embeddings by diving by sqrt(d_model).
79
+ use_cache (`bool`, *optional*, defaults to `True`):
80
+ Whether or not the model should return the last key/values attentions (not used by all models).
81
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
82
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
83
+ `eos_token_id`.
84
+ use_prompt (`bool`, *optional*, defaults to `False`):
85
+ Whether or not to use prompt.
86
+ prompt_length (`int`, *optional*, defaults to 100):
87
+ The length of prompt.
88
+ prompt_mid_dim (`int`, *optional*, defaults to 800):
89
+ Dimensionality of the "intermediate" layer in prompt.
90
+ Example:
91
+
92
+ ```python
93
+ >>> from transformers import MvpConfig, MvpModel
94
+
95
+ >>> # Initializing a MVP RUCAIBox/mvp style configuration
96
+ >>> configuration = MvpConfig()
97
+
98
+ >>> # Initializing a model (with random weights) from the RUCAIBox/mvp style configuration
99
+ >>> model = MvpModel(configuration)
100
+
101
+ >>> # Accessing the model configuration
102
+ >>> configuration = model.config
103
+ ```"""
104
+
105
+ model_type = "mvp"
106
+ keys_to_ignore_at_inference = ["past_key_values"]
107
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
108
+
109
+ def __init__(
110
+ self,
111
+ vocab_size=50267,
112
+ max_position_embeddings=1024,
113
+ encoder_layers=12,
114
+ encoder_ffn_dim=4096,
115
+ encoder_attention_heads=16,
116
+ decoder_layers=12,
117
+ decoder_ffn_dim=4096,
118
+ decoder_attention_heads=16,
119
+ encoder_layerdrop=0.0,
120
+ decoder_layerdrop=0.0,
121
+ activation_function="gelu",
122
+ d_model=1024,
123
+ dropout=0.1,
124
+ attention_dropout=0.0,
125
+ activation_dropout=0.0,
126
+ init_std=0.02,
127
+ classifier_dropout=0.0,
128
+ scale_embedding=False,
129
+ use_cache=True,
130
+ pad_token_id=1,
131
+ bos_token_id=0,
132
+ eos_token_id=2,
133
+ is_encoder_decoder=True,
134
+ decoder_start_token_id=2,
135
+ forced_eos_token_id=2,
136
+ use_prompt=False,
137
+ prompt_length=100,
138
+ prompt_mid_dim=800,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.max_position_embeddings = max_position_embeddings
143
+ self.d_model = d_model
144
+ self.encoder_ffn_dim = encoder_ffn_dim
145
+ self.encoder_layers = encoder_layers
146
+ self.encoder_attention_heads = encoder_attention_heads
147
+ self.decoder_ffn_dim = decoder_ffn_dim
148
+ self.decoder_layers = decoder_layers
149
+ self.decoder_attention_heads = decoder_attention_heads
150
+ self.dropout = dropout
151
+ self.attention_dropout = attention_dropout
152
+ self.activation_dropout = activation_dropout
153
+ self.activation_function = activation_function
154
+ self.init_std = init_std
155
+ self.encoder_layerdrop = encoder_layerdrop
156
+ self.decoder_layerdrop = decoder_layerdrop
157
+ self.classifier_dropout = classifier_dropout
158
+ self.use_cache = use_cache
159
+ self.num_hidden_layers = encoder_layers
160
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
161
+ self.use_prompt = use_prompt
162
+ self.prompt_length = prompt_length
163
+ self.prompt_mid_dim = prompt_mid_dim
164
+
165
+ super().__init__(
166
+ pad_token_id=pad_token_id,
167
+ bos_token_id=bos_token_id,
168
+ eos_token_id=eos_token_id,
169
+ is_encoder_decoder=is_encoder_decoder,
170
+ decoder_start_token_id=decoder_start_token_id,
171
+ forced_eos_token_id=forced_eos_token_id,
172
+ **kwargs,
173
+ )
174
+
175
+ if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", False):
176
+ self.forced_bos_token_id = self.bos_token_id
177
+ warnings.warn(
178
+ f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
179
+ "The config can simply be saved and uploaded again to be fixed."
180
+ )
181
+
182
+
183
+ __all__ = ["MvpConfig"]
janus/lib/python3.10/site-packages/transformers/models/mvp/tokenization_mvp.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from functools import lru_cache
19
+ from typing import List, Optional, Tuple
20
+
21
+ import regex as re
22
+
23
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer
24
+ from ...utils import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
31
+
32
+ # See all MVP models at https://huggingface.co/models?filter=mvp
33
+
34
+
35
+ @lru_cache()
36
+ def bytes_to_unicode():
37
+ """
38
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
39
+ characters the bpe code barfs on.
40
+
41
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
42
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
43
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
44
+ tables between utf-8 bytes and unicode strings.
45
+ """
46
+ bs = (
47
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
48
+ )
49
+ cs = bs[:]
50
+ n = 0
51
+ for b in range(2**8):
52
+ if b not in bs:
53
+ bs.append(b)
54
+ cs.append(2**8 + n)
55
+ n += 1
56
+ cs = [chr(n) for n in cs]
57
+ return dict(zip(bs, cs))
58
+
59
+
60
+ def get_pairs(word):
61
+ """
62
+ Return set of symbol pairs in a word.
63
+
64
+ Word is represented as tuple of symbols (symbols being variable-length strings).
65
+ """
66
+ pairs = set()
67
+ prev_char = word[0]
68
+ for char in word[1:]:
69
+ pairs.add((prev_char, char))
70
+ prev_char = char
71
+ return pairs
72
+
73
+
74
+ class MvpTokenizer(PreTrainedTokenizer):
75
+ """
76
+ Constructs a MVP tokenizer, which is smilar to the RoBERTa tokenizer, using byte-level Byte-Pair-Encoding.
77
+
78
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
79
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
80
+
81
+ ```python
82
+ >>> from transformers import MvpTokenizer
83
+
84
+ >>> tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp")
85
+ >>> tokenizer("Hello world")["input_ids"]
86
+ [0, 31414, 232, 2]
87
+
88
+ >>> tokenizer(" Hello world")["input_ids"]
89
+ [0, 20920, 232, 2]
90
+ ```
91
+
92
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
93
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
94
+
95
+ <Tip>
96
+
97
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
98
+
99
+ </Tip>
100
+
101
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
102
+ this superclass for more information regarding those methods.
103
+
104
+ Args:
105
+ vocab_file (`str`):
106
+ Path to the vocabulary file.
107
+ merges_file (`str`):
108
+ Path to the merges file.
109
+ errors (`str`, *optional*, defaults to `"replace"`):
110
+ Paradigm to follow when decoding bytes to UTF-8. See
111
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
112
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
113
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
114
+
115
+ <Tip>
116
+
117
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
118
+ sequence. The token used is the `cls_token`.
119
+
120
+ </Tip>
121
+
122
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
123
+ The end of sequence token.
124
+
125
+ <Tip>
126
+
127
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
128
+ The token used is the `sep_token`.
129
+
130
+ </Tip>
131
+
132
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
133
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
134
+ sequence classification or for a text and a question for question answering. It is also used as the last
135
+ token of a sequence built with special tokens.
136
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
137
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
138
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
139
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
140
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
141
+ token instead.
142
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
143
+ The token used for padding, for example when batching sequences of different lengths.
144
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
145
+ The token used for masking values. This is the token used when training this model with masked language
146
+ modeling. This is the token which the model will try to predict.
147
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
148
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
149
+ other word. (MVP tokenizer detect beginning of words by the preceding space).
150
+ """
151
+
152
+ vocab_files_names = VOCAB_FILES_NAMES
153
+ model_input_names = ["input_ids", "attention_mask"]
154
+
155
+ def __init__(
156
+ self,
157
+ vocab_file,
158
+ merges_file,
159
+ errors="replace",
160
+ bos_token="<s>",
161
+ eos_token="</s>",
162
+ sep_token="</s>",
163
+ cls_token="<s>",
164
+ unk_token="<unk>",
165
+ pad_token="<pad>",
166
+ mask_token="<mask>",
167
+ add_prefix_space=False,
168
+ **kwargs,
169
+ ):
170
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
171
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
172
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
173
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
174
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
175
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
176
+
177
+ # Mask token behave like a normal word, i.e. include the space before it
178
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
179
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
180
+ self.encoder = json.load(vocab_handle)
181
+ self.decoder = {v: k for k, v in self.encoder.items()}
182
+ self.errors = errors # how to handle errors in decoding
183
+ self.byte_encoder = bytes_to_unicode()
184
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
185
+ with open(merges_file, encoding="utf-8") as merges_handle:
186
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
187
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
188
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
189
+ self.cache = {}
190
+ self.add_prefix_space = add_prefix_space
191
+
192
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
193
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
194
+
195
+ super().__init__(
196
+ errors=errors,
197
+ bos_token=bos_token,
198
+ eos_token=eos_token,
199
+ unk_token=unk_token,
200
+ sep_token=sep_token,
201
+ cls_token=cls_token,
202
+ pad_token=pad_token,
203
+ mask_token=mask_token,
204
+ add_prefix_space=add_prefix_space,
205
+ **kwargs,
206
+ )
207
+
208
+ @property
209
+ def vocab_size(self):
210
+ return len(self.encoder)
211
+
212
+ def get_vocab(self):
213
+ vocab = self.encoder.copy()
214
+ vocab.update(self.added_tokens_encoder)
215
+ return vocab
216
+
217
+ def bpe(self, token):
218
+ if token in self.cache:
219
+ return self.cache[token]
220
+ word = tuple(token)
221
+ pairs = get_pairs(word)
222
+
223
+ if not pairs:
224
+ return token
225
+
226
+ while True:
227
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
228
+ if bigram not in self.bpe_ranks:
229
+ break
230
+ first, second = bigram
231
+ new_word = []
232
+ i = 0
233
+ while i < len(word):
234
+ try:
235
+ j = word.index(first, i)
236
+ except ValueError:
237
+ new_word.extend(word[i:])
238
+ break
239
+ else:
240
+ new_word.extend(word[i:j])
241
+ i = j
242
+
243
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
244
+ new_word.append(first + second)
245
+ i += 2
246
+ else:
247
+ new_word.append(word[i])
248
+ i += 1
249
+ new_word = tuple(new_word)
250
+ word = new_word
251
+ if len(word) == 1:
252
+ break
253
+ else:
254
+ pairs = get_pairs(word)
255
+ word = " ".join(word)
256
+ self.cache[token] = word
257
+ return word
258
+
259
+ def _tokenize(self, text):
260
+ """Tokenize a string."""
261
+ bpe_tokens = []
262
+ for token in re.findall(self.pat, text):
263
+ token = "".join(
264
+ self.byte_encoder[b] for b in token.encode("utf-8")
265
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
266
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
267
+ return bpe_tokens
268
+
269
+ def _convert_token_to_id(self, token):
270
+ """Converts a token (str) in an id using the vocab."""
271
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
272
+
273
+ def _convert_id_to_token(self, index):
274
+ """Converts an index (integer) in a token (str) using the vocab."""
275
+ return self.decoder.get(index)
276
+
277
+ def convert_tokens_to_string(self, tokens):
278
+ """Converts a sequence of tokens (string) in a single string."""
279
+ text = "".join(tokens)
280
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
281
+ return text
282
+
283
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
284
+ if not os.path.isdir(save_directory):
285
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
286
+ return
287
+ vocab_file = os.path.join(
288
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
289
+ )
290
+ merge_file = os.path.join(
291
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
292
+ )
293
+
294
+ with open(vocab_file, "w", encoding="utf-8") as f:
295
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
296
+
297
+ index = 0
298
+ with open(merge_file, "w", encoding="utf-8") as writer:
299
+ writer.write("#version: 0.2\n")
300
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
301
+ if index != token_index:
302
+ logger.warning(
303
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
304
+ " Please check that the tokenizer is not corrupted!"
305
+ )
306
+ index = token_index
307
+ writer.write(" ".join(bpe_tokens) + "\n")
308
+ index += 1
309
+
310
+ return vocab_file, merge_file
311
+
312
+ def build_inputs_with_special_tokens(
313
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
314
+ ) -> List[int]:
315
+ """
316
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
317
+ adding special tokens. A MVP sequence has the following format:
318
+
319
+ - single sequence: `<s> X </s>`
320
+ - pair of sequences: `<s> A </s></s> B </s>`
321
+
322
+ Args:
323
+ token_ids_0 (`List[int]`):
324
+ List of IDs to which the special tokens will be added.
325
+ token_ids_1 (`List[int]`, *optional*):
326
+ Optional second list of IDs for sequence pairs.
327
+
328
+ Returns:
329
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
330
+ """
331
+ if token_ids_1 is None:
332
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
333
+ cls = [self.cls_token_id]
334
+ sep = [self.sep_token_id]
335
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
336
+
337
+ def get_special_tokens_mask(
338
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
339
+ ) -> List[int]:
340
+ """
341
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
342
+ special tokens using the tokenizer `prepare_for_model` method.
343
+
344
+ Args:
345
+ token_ids_0 (`List[int]`):
346
+ List of IDs.
347
+ token_ids_1 (`List[int]`, *optional*):
348
+ Optional second list of IDs for sequence pairs.
349
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
350
+ Whether or not the token list is already formatted with special tokens for the model.
351
+
352
+ Returns:
353
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
354
+ """
355
+ if already_has_special_tokens:
356
+ return super().get_special_tokens_mask(
357
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
358
+ )
359
+
360
+ if token_ids_1 is None:
361
+ return [1] + ([0] * len(token_ids_0)) + [1]
362
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
363
+
364
+ def create_token_type_ids_from_sequences(
365
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
366
+ ) -> List[int]:
367
+ """
368
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not
369
+ make use of token type ids, therefore a list of zeros is returned.
370
+
371
+ Args:
372
+ token_ids_0 (`List[int]`):
373
+ List of IDs.
374
+ token_ids_1 (`List[int]`, *optional*):
375
+ Optional second list of IDs for sequence pairs.
376
+
377
+ Returns:
378
+ `List[int]`: List of zeros.
379
+ """
380
+ sep = [self.sep_token_id]
381
+ cls = [self.cls_token_id]
382
+
383
+ if token_ids_1 is None:
384
+ return len(cls + token_ids_0 + sep) * [0]
385
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
386
+
387
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
388
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
389
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
390
+ text = " " + text
391
+ return (text, kwargs)
392
+
393
+
394
+ __all__ = ["MvpTokenizer"]