|
import torch
|
|
import torch.nn as nn
|
|
from transformers import CLIPTextModel
|
|
from transformers.models.clip.modeling_clip import CLIPAttention
|
|
from typing import Any, Callable, Dict, Optional, Tuple, Union, List
|
|
from transformers.modeling_outputs import BaseModelOutputWithPooling
|
|
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
|
|
|
_make_causal_mask = AttentionMaskConverter._make_causal_mask
|
|
_expand_mask = AttentionMaskConverter._expand_mask
|
|
|
|
from adaface.util import add_noise_to_tensor
|
|
|
|
|
|
|
|
class CLIPAttentionMKV(nn.Module):
|
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
|
|
def __init__(self, config, multiplier=2):
|
|
super().__init__()
|
|
self.config = config
|
|
self.embed_dim = config.hidden_size
|
|
self.num_heads = config.num_attention_heads
|
|
self.head_dim = self.embed_dim // self.num_heads
|
|
if self.head_dim * self.num_heads != self.embed_dim:
|
|
raise ValueError(
|
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
|
f" {self.num_heads})."
|
|
)
|
|
self.scale = self.head_dim**-0.5
|
|
self.dropout = config.attention_dropout
|
|
self.multiplier = multiplier
|
|
|
|
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim * self.multiplier)
|
|
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim * self.multiplier)
|
|
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
|
|
|
|
|
|
|
|
|
|
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
|
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
|
|
|
def extend_weights(self, clip_attn_layer, layer_idx, multiplier, noise_std=0.1,
|
|
noise_std_is_relative=True, keep_norm=False, verbose=False):
|
|
self.multiplier *= multiplier
|
|
|
|
self.q_proj.weight.data = clip_attn_layer.q_proj.weight.data.clone()
|
|
self.q_proj.bias.data = clip_attn_layer.q_proj.bias.data.clone()
|
|
self.out_proj.weight.data = clip_attn_layer.out_proj.weight.data.clone()
|
|
self.out_proj.bias.data = clip_attn_layer.out_proj.bias.data.clone()
|
|
|
|
|
|
|
|
|
|
self.v_proj.bias.data = clip_attn_layer.v_proj.bias.data.repeat(multiplier)
|
|
self.k_proj.bias.data = clip_attn_layer.k_proj.bias.data.repeat(multiplier)
|
|
|
|
self.v_proj.weight.data = clip_attn_layer.v_proj.weight.data.repeat(multiplier, 1)
|
|
self.k_proj.weight.data = clip_attn_layer.k_proj.weight.data.repeat(multiplier, 1)
|
|
|
|
if noise_std > 0:
|
|
ORIG_V_SHAPE = list(clip_attn_layer.v_proj.weight.shape)
|
|
ORIG_V_SHAPE_D0 = ORIG_V_SHAPE[0]
|
|
|
|
self.v_proj.weight.data[ORIG_V_SHAPE_D0:] = \
|
|
add_noise_to_tensor(self.v_proj.weight.data[ORIG_V_SHAPE_D0:],
|
|
noise_std, noise_std_is_relative, keep_norm)
|
|
if verbose:
|
|
NEW_V_SHAPE = list(self.v_proj.weight.shape)
|
|
NOISED_V_SHAPE = list(self.v_proj.weight.data[ORIG_V_SHAPE_D0:].shape)
|
|
print(f"Layer {layer_idx}: {NOISED_V_SHAPE} in {NEW_V_SHAPE} of v_proj is added with {noise_std} noise")
|
|
|
|
ORIG_K_SHAPE = list(clip_attn_layer.k_proj.weight.shape)
|
|
ORIG_K_SHAPE_D0 = ORIG_K_SHAPE[0]
|
|
|
|
self.k_proj.weight.data[ORIG_K_SHAPE_D0:] = \
|
|
add_noise_to_tensor(self.k_proj.weight.data[ORIG_K_SHAPE_D0:],
|
|
noise_std, noise_std_is_relative, keep_norm)
|
|
if verbose:
|
|
NEW_K_SHAPE = list(self.k_proj.weight.shape)
|
|
NOISED_K_SHAPE = list(self.k_proj.weight.data[ORIG_K_SHAPE_D0:].shape)
|
|
print(f"Layer {layer_idx}: {NOISED_K_SHAPE} in {NEW_K_SHAPE} of k_proj is added with {noise_std} noise")
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
causal_attention_mask: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = False,
|
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
|
"""Input shape: Batch x Time x Channel"""
|
|
|
|
bsz, tgt_len, embed_dim = hidden_states.size()
|
|
|
|
query_states = self.q_proj(hidden_states) * self.scale
|
|
|
|
|
|
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
|
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
|
|
|
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
|
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
|
key_states = key_states.view(*proj_shape)
|
|
value_states = value_states.view(*proj_shape)
|
|
|
|
src_len = key_states.size(1)
|
|
|
|
src_len0 = src_len // self.multiplier
|
|
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
|
|
|
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
|
raise ValueError(
|
|
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
|
f" {attn_weights.size()}"
|
|
)
|
|
|
|
|
|
if causal_attention_mask is not None:
|
|
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len0):
|
|
raise ValueError(
|
|
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len0)}, but is"
|
|
f" {causal_attention_mask.size()}"
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len0, self.multiplier) + causal_attention_mask.unsqueeze(4)
|
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
|
|
|
if attention_mask is not None:
|
|
if attention_mask.size() != (bsz, 1, tgt_len, src_len0):
|
|
raise ValueError(
|
|
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len0)}, but is {attention_mask.size()}"
|
|
)
|
|
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len0, self.multiplier) + attention_mask.unsqueeze(4)
|
|
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
|
|
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
|
|
|
if output_attentions:
|
|
|
|
|
|
|
|
|
|
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
|
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
|
|
else:
|
|
attn_weights_reshaped = None
|
|
|
|
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
|
|
|
attn_output = torch.bmm(attn_probs, value_states)
|
|
|
|
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
|
raise ValueError(
|
|
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
|
f" {attn_output.size()}"
|
|
)
|
|
|
|
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
|
attn_output = attn_output.transpose(1, 2)
|
|
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
|
|
|
|
attn_output = self.out_proj(attn_output)
|
|
|
|
return attn_output, attn_weights_reshaped
|
|
|
|
class CLIPTextModelWrapper(CLIPTextModel):
|
|
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: Optional[torch.Tensor] = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
position_ids: Optional[torch.Tensor] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
return_dict: Optional[bool] = None,
|
|
input_token_embs: Optional[torch.Tensor] = None,
|
|
hidden_state_layer_weights: Optional[torch.Tensor] = None,
|
|
return_token_embs: Optional[bool] = False,
|
|
) -> Union[Tuple, torch.Tensor, BaseModelOutputWithPooling]:
|
|
|
|
if return_token_embs:
|
|
return self.text_model.embeddings.token_embedding(input_ids)
|
|
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
output_attentions = output_attentions if output_attentions is not None else self.text_model.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.text_model.config.output_hidden_states
|
|
)
|
|
if hidden_state_layer_weights is not None:
|
|
output_hidden_states = True
|
|
return_dict = return_dict if return_dict is not None else self.text_model.config.use_return_dict
|
|
|
|
if input_ids is None:
|
|
raise ValueError("You have to specify input_ids")
|
|
|
|
input_shape = input_ids.size()
|
|
input_ids = input_ids.view(-1, input_shape[-1])
|
|
|
|
hidden_states = self.text_model.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=input_token_embs)
|
|
|
|
|
|
|
|
causal_attention_mask = _make_causal_mask(input_shape, hidden_states.dtype, device=hidden_states.device)
|
|
|
|
if attention_mask is not None:
|
|
|
|
attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
|
|
|
|
encoder_outputs = self.text_model.encoder(
|
|
inputs_embeds=hidden_states,
|
|
attention_mask=attention_mask,
|
|
causal_attention_mask=causal_attention_mask,
|
|
output_attentions=output_attentions,
|
|
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
|
|
|
|
|
|
|
|
if hidden_state_layer_weights is None:
|
|
last_hidden_state = encoder_outputs[0]
|
|
else:
|
|
num_hidden_state_layers = len(hidden_state_layer_weights)
|
|
last_hidden_states = encoder_outputs[1][-num_hidden_state_layers:]
|
|
hidden_state_layer_weights = hidden_state_layer_weights.to(last_hidden_states[0].dtype)
|
|
|
|
|
|
hidden_state_layer_weights = hidden_state_layer_weights / hidden_state_layer_weights.sum(dim=0, keepdim=True)
|
|
|
|
hidden_state_layer_weights = hidden_state_layer_weights.unsqueeze(1).unsqueeze(1)
|
|
|
|
|
|
last_hidden_state = (torch.stack(last_hidden_states, dim=0) * hidden_state_layer_weights).sum(dim=0)
|
|
|
|
last_hidden_state = self.text_model.final_layer_norm(last_hidden_state)
|
|
|
|
|
|
if self.text_model.eos_token_id == 2:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pooled_output = last_hidden_state[
|
|
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
|
input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
|
|
]
|
|
else:
|
|
|
|
pooled_output = last_hidden_state[
|
|
torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
|
|
|
|
(input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.text_model.eos_token_id)
|
|
.int()
|
|
.argmax(dim=-1),
|
|
]
|
|
|
|
if not return_dict:
|
|
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
|
|
|
return BaseModelOutputWithPooling(
|
|
last_hidden_state=last_hidden_state,
|
|
pooler_output=pooled_output,
|
|
hidden_states=encoder_outputs.hidden_states,
|
|
attentions=encoder_outputs.attentions,
|
|
)
|
|
|
|
|
|
|
|
|
|
def extend_clip_attention_MKV_multiplier(self, begin_layer_idx=-1, end_layer_idx=-1, multiplier=2, noise_std=0.1):
|
|
num_extended_layers = 0
|
|
|
|
for layer_idx, layer in enumerate(self.text_model.encoder.layers):
|
|
if begin_layer_idx >= 0 and layer_idx < begin_layer_idx:
|
|
continue
|
|
if end_layer_idx >= 0 and layer_idx >= end_layer_idx:
|
|
break
|
|
|
|
if not isinstance(layer.self_attn, (CLIPAttention, CLIPAttentionMKV)):
|
|
breakpoint()
|
|
old_attn_layer = layer.self_attn
|
|
if not isinstance(old_attn_layer, CLIPAttentionMKV):
|
|
layer.self_attn = CLIPAttentionMKV(old_attn_layer.config, 1)
|
|
layer.self_attn.extend_weights(old_attn_layer, layer_idx, multiplier, noise_std, verbose=True)
|
|
num_extended_layers += 1
|
|
|
|
return num_extended_layers
|
|
|