Upload model.py with huggingface_hub
Browse files
model.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import nn
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from transformers import AutoConfig, WhisperConfig, LlamaConfig, WhisperModel, AutoModelForCausalLM
|
5 |
+
from typing import Callable, Optional, Union
|
6 |
+
|
7 |
+
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, CausalLMOutputWithPast
|
8 |
+
from transformers.cache_utils import Cache
|
9 |
+
from transformers.processing_utils import Unpack
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class ManaraConfig:
|
14 |
+
audio_config: WhisperConfig
|
15 |
+
text_config: LlamaConfig
|
16 |
+
|
17 |
+
|
18 |
+
class MultiModalProjector(nn.Module):
|
19 |
+
|
20 |
+
def __init__(self, config: ManaraConfig):
|
21 |
+
super().__init__()
|
22 |
+
self.linear_1 = nn.Linear(config.audio_config.d_model,
|
23 |
+
config.text_config.hidden_size, bias=False)
|
24 |
+
self.act = nn.SiLU()
|
25 |
+
self.pool = nn.Conv1d(
|
26 |
+
config.text_config.hidden_size,
|
27 |
+
config.text_config.hidden_size,
|
28 |
+
kernel_size=2,
|
29 |
+
stride=2,
|
30 |
+
bias=False
|
31 |
+
)
|
32 |
+
self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size,
|
33 |
+
bias=False)
|
34 |
+
|
35 |
+
def forward(self, audio_features):
|
36 |
+
hidden_states = self.linear_1(audio_features)
|
37 |
+
hidden_states = self.act(hidden_states)
|
38 |
+
hidden_states = self.pool(hidden_states.transpose(1, 2)).transpose(1, 2)
|
39 |
+
hidden_states = self.linear_2(hidden_states)
|
40 |
+
return hidden_states
|
41 |
+
|
42 |
+
|
43 |
+
class ManaraForConditionalGeneration(nn.Module):
|
44 |
+
def __init__(self):
|
45 |
+
super().__init__()
|
46 |
+
audio = 'openai/whisper-large-v3'
|
47 |
+
text = 'babs/llama-multi-lm'
|
48 |
+
self.text_config = LlamaConfig.from_pretrained(text)
|
49 |
+
self.audio_config = WhisperConfig.from_pretrained(audio)
|
50 |
+
|
51 |
+
config = ManaraConfig(self.audio_config, self.text_config)
|
52 |
+
self.vocab_size = config.text_config.vocab_size
|
53 |
+
|
54 |
+
audio_tower = WhisperModel.from_pretrained(audio,
|
55 |
+
attn_implementation='flash_attention_2')
|
56 |
+
self.audio_tower = audio_tower.encoder
|
57 |
+
self.audio_tower.requires_grad_(False)
|
58 |
+
del audio_tower
|
59 |
+
|
60 |
+
self.language_model = AutoModelForCausalLM.from_pretrained(text,
|
61 |
+
attn_implementation='flash_attention_2')
|
62 |
+
self.multi_modal_projector = MultiModalProjector(config)
|
63 |
+
self.audio_token_id = 128002
|
64 |
+
|
65 |
+
def get_input_embeddings(self):
|
66 |
+
return self.language_model.get_input_embeddings()
|
67 |
+
|
68 |
+
def set_input_embeddings(self, value):
|
69 |
+
self.language_model.set_input_embeddings(value)
|
70 |
+
|
71 |
+
def get_output_embeddings(self):
|
72 |
+
return self.language_model.get_output_embeddings()
|
73 |
+
|
74 |
+
def set_output_embeddings(self, new_embeddings):
|
75 |
+
self.language_model.set_output_embeddings(new_embeddings)
|
76 |
+
|
77 |
+
def set_decoder(self, decoder):
|
78 |
+
self.language_model.set_decoder(decoder)
|
79 |
+
|
80 |
+
def get_decoder(self):
|
81 |
+
return self.language_model.get_decoder()
|
82 |
+
|
83 |
+
def get_audio_embeds(self, input_features: torch.FloatTensor):
|
84 |
+
audio_outputs = self.audio_tower(input_features)
|
85 |
+
audio_hidden_states = audio_outputs.last_hidden_state
|
86 |
+
# audio_hidden_states = audio_hidden_states.reshape(-1, self.config.audio_config.intermediate_size)
|
87 |
+
audio_embeds = self.multi_modal_projector(audio_hidden_states)
|
88 |
+
return audio_embeds
|
89 |
+
|
90 |
+
def forward(
|
91 |
+
self,
|
92 |
+
input_ids: Optional[torch.LongTensor] = None,
|
93 |
+
input_features: Optional[torch.FloatTensor] = None,
|
94 |
+
attention_mask: Optional[torch.Tensor] = None,
|
95 |
+
position_ids: Optional[torch.LongTensor] = None,
|
96 |
+
past_key_values: Optional[Cache] = None,
|
97 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
98 |
+
labels: Optional[torch.LongTensor] = None,
|
99 |
+
use_cache: Optional[bool] = None,
|
100 |
+
cache_position: Optional[torch.LongTensor] = None,
|
101 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
102 |
+
**kwargs,
|
103 |
+
) -> CausalLMOutputWithPast:
|
104 |
+
|
105 |
+
if inputs_embeds is None:
|
106 |
+
inputs_embeds = self.get_input_embeddings()(input_ids)
|
107 |
+
|
108 |
+
if input_features is not None:
|
109 |
+
audio_embeds = self.get_audio_embeds(input_features)
|
110 |
+
|
111 |
+
audio_token_mask = (input_ids == self.audio_token_id)
|
112 |
+
|
113 |
+
if audio_token_mask.sum() != audio_embeds.shape[0] * audio_embeds.shape[1]:
|
114 |
+
raise ValueError(
|
115 |
+
f"The number of audio tokens in input_ids ({audio_token_mask.sum()}) does not match "
|
116 |
+
f"the number of audio features ({audio_embeds.shape[0] * audio_embeds.shape[1]}). "
|
117 |
+
"Check your data preparation.")
|
118 |
+
|
119 |
+
inputs_embeds[audio_token_mask] = audio_embeds.flatten(0, 1).to(inputs_embeds.dtype)
|
120 |
+
|
121 |
+
outputs = self.language_model(
|
122 |
+
attention_mask=attention_mask,
|
123 |
+
position_ids=position_ids,
|
124 |
+
past_key_values=past_key_values,
|
125 |
+
inputs_embeds=inputs_embeds,
|
126 |
+
labels=labels,
|
127 |
+
use_cache=use_cache,
|
128 |
+
cache_position=cache_position,
|
129 |
+
logits_to_keep=logits_to_keep,
|
130 |
+
return_dict=True,
|
131 |
+
**kwargs,
|
132 |
+
)
|
133 |
+
return outputs
|
134 |
+
|
135 |
+
|
136 |
+
if __name__ == "__main__":
|
137 |
+
model = ManaraForConditionalGeneration()
|
138 |
+
print(model)
|
139 |
+
print("Audio Token ID:", model.audio_token_id)
|
140 |
+
print("Vocab Size:", model.vocab_size)
|
141 |
+
print("Text Config:", model.text_config)
|
142 |
+
print("Audio Config:", model.audio_config)
|
143 |
+
print("Model Parameters:", sum(p.numel() for p in model.parameters() if p.requires_grad))
|