Samoed commited on
Commit
2df56dc
·
verified ·
1 Parent(s): cfeb668

Base Integration with SentenceTransformers

Browse files
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 1536,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": false,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": true,
9
+ "include_prompt": true
10
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompts": {
3
+ "query": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
4
+ },
5
+ "default_prompt_name": null,
6
+ "similarity_fn_name": null
7
+ }
custom_st.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from typing import Any, Dict, Optional, List
3
+ import torch
4
+ from PIL import Image
5
+ from sentence_transformers.models import Transformer as BaseTransformer
6
+ from transformers import AutoModelForVision2Seq, AutoProcessor
7
+
8
+
9
+ class MultiModalTransformer(BaseTransformer):
10
+ def __init__(
11
+ self,
12
+ model_name_or_path: str,
13
+ cache_dir: Optional[str] = None,
14
+ tokenizer_args: Optional[Dict[str, Any]] = None,
15
+ min_image_tokens: int = 256,
16
+ max_image_tokens: int = 1280,
17
+ max_length: int = 1800,
18
+ **kwargs,
19
+ ):
20
+ super().__init__(model_name_or_path, **kwargs)
21
+ if tokenizer_args is None:
22
+ tokenizer_args = {}
23
+ tokenizer_args.pop("trust_remote_code", None)
24
+
25
+ # Initialize processor
26
+ min_pixels = min_image_tokens * 28 * 28
27
+ max_pixels = max_image_tokens * 28 * 28
28
+ self.processor = AutoProcessor.from_pretrained(
29
+ model_name_or_path, min_pixels=min_pixels, max_pixels=max_pixels, **kwargs
30
+ )
31
+ self.processor.tokenizer.padding_side = 'right'
32
+ self.sep = ' '
33
+ self.max_length = max_length
34
+ self.normalize = True
35
+
36
+ def _load_model(
37
+ self,
38
+ model_name_or_path: str,
39
+ config,
40
+ cache_dir: str,
41
+ backend: str,
42
+ is_peft_model: bool,
43
+ **model_args,
44
+ ) -> None:
45
+ model_args.pop("trust_remote_code", None)
46
+ self.auto_model = AutoModelForVision2Seq.from_pretrained(
47
+ model_name_or_path, torch_dtype=torch.float16, **model_args
48
+ )
49
+
50
+ def forward(
51
+ self, features: Dict[str, torch.Tensor], **kwargs
52
+ ) -> Dict[str, torch.Tensor]:
53
+ if features.get("inputs_embeds", None) is None:
54
+ features["inputs_embeds"] = self.auto_model.base_model.embed_tokens(features["input_ids"])
55
+ if features.get("pixel_values", None) is not None:
56
+ features["pixel_values"] = features["pixel_values"].type(self.auto_model.visual.get_dtype())
57
+ image_embeds = self.auto_model.visual(
58
+ features["pixel_values"], grid_thw=features["image_grid_thw"]
59
+ )
60
+ image_mask = features["input_ids"] == self.auto_model.config.image_token_id
61
+ features["inputs_embeds"][image_mask] = image_embeds
62
+ features.pop("pixel_values")
63
+ features.pop("image_grid_thw")
64
+ features.pop("input_ids")
65
+ outputs = self.auto_model.model(
66
+ **features,
67
+ return_dict=True,
68
+ output_hidden_states=True,
69
+ # **kwargs
70
+ )
71
+ pooling_mask = features["attention_mask"] if features.get("pooling_mask", None) is None else features["pooling_mask"]
72
+ left_padding = (pooling_mask[:, -1].sum() == pooling_mask.shape[0]) # TODO
73
+ if left_padding:
74
+ embeddings = outputs.last_hidden_state
75
+ else:
76
+ sequence_lengths = pooling_mask.sum(dim=1) - 1
77
+ embeddings = outputs.last_hidden_state[torch.arange(
78
+ outputs.last_hidden_state.shape[0], device=outputs.last_hidden_state.device
79
+ ), sequence_lengths]
80
+ features.update({"token_embeddings": embeddings})
81
+ return features
82
+
83
+ def tokenize(self, texts: List[List[Dict[str, Image.Image]]] | List[str]) -> Dict[str, torch.Tensor]:
84
+ split_token = "<|im_end|>\n"
85
+ def process_text_item(item):
86
+ if isinstance(item, str):
87
+ return item, None
88
+
89
+ text, img = "", None
90
+ if "image" in item:
91
+ text += "<|vision_start|><|image_pad|><|vision_end|>"
92
+ img = item["image"]
93
+ if isinstance(img, bytes):
94
+ img = Image.open(BytesIO(img)).convert("RGB")
95
+ elif isinstance(img, str):
96
+ img = Image.open(img).convert("RGB")
97
+ elif not isinstance(img, Image):
98
+ raise ValueError(f"Unknown image type {type(img)}")
99
+ if "text" in item:
100
+ text += item["text"].lstrip()
101
+ if split_token in text:
102
+ instruction, text = text.split(split_token, 1)
103
+ text = f'{instruction}{split_token}<|im_start|>user\n{input_str}<|im_end|>\n<|im_start|>assistant\n<|endoftext|>'
104
+ else:
105
+ text = f"<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant\n<|endoftext|>"
106
+ return text, img
107
+
108
+ all_texts, all_images = [], []
109
+ for item in texts:
110
+ text, images = process_text_item(item)
111
+ all_texts.append(text)
112
+ all_images.append(images)
113
+
114
+ if all_images != [None] * len(all_images):
115
+ inputs = self.processor(
116
+ text=all_texts,
117
+ images=all_images,
118
+ padding="longest",
119
+ truncation=True,
120
+ max_length=self.max_seq_length,
121
+ return_tensors="pt"
122
+ )
123
+ else:
124
+ inputs = self.processor(
125
+ text=all_texts,
126
+ padding="longest",
127
+ truncation=True,
128
+ max_length=self.max_seq_length,
129
+ return_tensors="pt"
130
+ )
131
+ return inputs
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "custom_st.MultiModalTransformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]