Fix: resize_token_embeddings's Interface for transformers v4.49.0
#97
by
gar231
- opened
- modeling_florence2.py +4 -4
modeling_florence2.py
CHANGED
@@ -2080,8 +2080,8 @@ class Florence2LanguageForConditionalGeneration(Florence2LanguagePreTrainedModel
|
|
2080 |
def get_decoder(self):
|
2081 |
return self.model.get_decoder()
|
2082 |
|
2083 |
-
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
|
2084 |
-
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
2085 |
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
|
2086 |
return new_embeddings
|
2087 |
|
@@ -2589,8 +2589,8 @@ class Florence2ForConditionalGeneration(Florence2PreTrainedModel):
|
|
2589 |
def get_input_embeddings(self):
|
2590 |
return self.language_model.get_input_embeddings()
|
2591 |
|
2592 |
-
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
|
2593 |
-
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
|
2594 |
# update vocab size
|
2595 |
self.config.text_config.vocab_size = model_embeds.num_embeddings
|
2596 |
self.config.vocab_size = model_embeds.num_embeddings
|
|
|
2080 |
def get_decoder(self):
|
2081 |
return self.model.get_decoder()
|
2082 |
|
2083 |
+
def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, **kwargs) -> nn.Embedding:
|
2084 |
+
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, **kwargs)
|
2085 |
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
|
2086 |
return new_embeddings
|
2087 |
|
|
|
2589 |
def get_input_embeddings(self):
|
2590 |
return self.language_model.get_input_embeddings()
|
2591 |
|
2592 |
+
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None, **kwargs) -> nn.Embedding:
|
2593 |
+
model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of, **kwargs)
|
2594 |
# update vocab size
|
2595 |
self.config.text_config.vocab_size = model_embeds.num_embeddings
|
2596 |
self.config.vocab_size = model_embeds.num_embeddings
|