michaeldinzinger commited on
Commit
6c40ccb
·
1 Parent(s): efdbaa3

Upload new script

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model_0/tokenizer.json filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,4 +1,4 @@
1
  .env
2
  poetry.lock
3
  **/__pycache__/
4
- model/
 
1
  .env
2
  poetry.lock
3
  **/__pycache__/
4
+ temp/
model_0/config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "jinaai/jina-embeddings-v3",
3
+ "architectures": [
4
+ "XLMRobertaLoRA"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "auto_map": {
8
+ "AutoConfig": "jinaai/xlm-roberta-flash-implementation--configuration_xlm_roberta.XLMRobertaFlashConfig",
9
+ "AutoModel": "jinaai/xlm-roberta-flash-implementation--modeling_lora.XLMRobertaLoRA",
10
+ "AutoModelForMaskedLM": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForMaskedLM",
11
+ "AutoModelForPreTraining": "jinaai/xlm-roberta-flash-implementation--modeling_xlm_roberta.XLMRobertaForPreTraining"
12
+ },
13
+ "bos_token_id": 0,
14
+ "classifier_dropout": null,
15
+ "emb_pooler": null,
16
+ "eos_token_id": 2,
17
+ "hidden_act": "gelu",
18
+ "hidden_dropout_prob": 0.1,
19
+ "hidden_size": 1024,
20
+ "initializer_range": 0.02,
21
+ "intermediate_size": 4096,
22
+ "layer_norm_eps": 1e-05,
23
+ "load_trained_adapters": true,
24
+ "lora_adaptations": [
25
+ "retrieval.query",
26
+ "retrieval.passage",
27
+ "separation",
28
+ "classification",
29
+ "text-matching"
30
+ ],
31
+ "lora_alpha": 1,
32
+ "lora_dropout_p": 0.0,
33
+ "lora_main_params_trainable": false,
34
+ "lora_rank": 4,
35
+ "matryoshka_dimensions": [
36
+ 32,
37
+ 64,
38
+ 128,
39
+ 256,
40
+ 512,
41
+ 768,
42
+ 1024
43
+ ],
44
+ "max_position_embeddings": 8194,
45
+ "model_type": "xlm-roberta",
46
+ "num_attention_heads": 16,
47
+ "num_hidden_layers": 24,
48
+ "output_past": true,
49
+ "pad_token_id": 1,
50
+ "position_embedding_type": "rotary",
51
+ "rotary_emb_base": 20000.0,
52
+ "task_instructions": {
53
+ "classification": "",
54
+ "retrieval.passage": "Represent the document for retrieval: ",
55
+ "retrieval.query": "Represent the query for retrieving evidence documents: ",
56
+ "separation": "",
57
+ "text-matching": ""
58
+ },
59
+ "torch_dtype": "bfloat16",
60
+ "transformers_version": "4.46.3",
61
+ "truncate_dim": null,
62
+ "type_vocab_size": 1,
63
+ "use_cache": true,
64
+ "use_flash_attn": true,
65
+ "use_reentrant": false,
66
+ "vocab_size": 250002
67
+ }
model_0/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17ca06efd886a065d0081912b04c9e27ef5086a9dd09659cce32aa9c84587f23
3
+ size 1144685320
model_0/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
model_0/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a56def25aa40facc030ea8b0b87f3688e4b3c39eb8b45d5702b3a1300fe2a20
3
+ size 17082734
model_0/tokenizer_config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 8194,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }
model_1/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Snowflake/snowflake-arctic-embed-s",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 384,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1536,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 12,
17
+ "num_hidden_layers": 12,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.46.3",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 30522
25
+ }
model_1/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3be181b2b68172a172cffcd124532af085c5f9ab7e97d5a5bc12a9effbb02a84
3
+ size 133462128
model_1/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
model_1/tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "max_length": 512,
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_to_multiple_of": null,
53
+ "pad_token": "[PAD]",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
+ "sep_token": "[SEP]",
57
+ "stride": 0,
58
+ "strip_accents": null,
59
+ "tokenize_chinese_chars": true,
60
+ "tokenizer_class": "BertTokenizer",
61
+ "truncation_side": "right",
62
+ "truncation_strategy": "longest_first",
63
+ "unk_token": "[UNK]"
64
+ }
model_1/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
modeling_jina_v3_arctic_s.py CHANGED
@@ -1,7 +1,9 @@
 
1
  import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
4
- from transformers import BertModel, PreTrainedModel, BertConfig, PretrainedConfig, AutoModel
 
5
  from typing import *
6
 
7
 
@@ -12,48 +14,12 @@ class ConcatModelConfig(PretrainedConfig):
12
  super().__init__(**kwargs)
13
 
14
 
15
- # See https://huggingface.co/Marqo/marqo-chimera-arctic-bge-m
16
  class ConcatModel(PreTrainedModel):
17
  config_class = ConcatModelConfig
18
 
19
- def __init__(self, config: ConcatModelConfig):
20
- super().__init__(config)
21
- bert_config_1 = BertConfig(
22
- vocab_size=30522,
23
- hidden_size=384,
24
- num_hidden_layers=12,
25
- num_attention_heads=12,
26
- intermediate_size=1536,
27
- hidden_act="gelu",
28
- hidden_dropout_prob=0.1,
29
- attention_probs_dropout_prob=0.1,
30
- max_position_embeddings=512,
31
- type_vocab_size=2,
32
- initializer_range=0.02,
33
- layer_norm_eps=1e-12,
34
- )
35
-
36
- bert_config_2 = BertConfig(
37
- vocab_size=30522,
38
- hidden_size=384,
39
- num_hidden_layers=12,
40
- num_attention_heads=12,
41
- intermediate_size=1536,
42
- hidden_act="gelu",
43
- hidden_dropout_prob=0.1,
44
- attention_probs_dropout_prob=0.1,
45
- max_position_embeddings=512,
46
- type_vocab_size=2,
47
- initializer_range=0.02,
48
- layer_norm_eps=1e-12,
49
- )
50
-
51
- self.model = nn.ModuleDict(
52
- {
53
- "model_0": BertModel(bert_config_1),
54
- "model_1": BertModel(bert_config_2),
55
- }
56
- )
57
 
58
  def forward(
59
  self,
@@ -63,31 +29,129 @@ class ConcatModel(PreTrainedModel):
63
  **kwargs
64
  ) -> torch.Tensor:
65
  embeddings = []
66
- for _, model in self.model.items():
67
- model_output = model(
68
- input_ids=input_ids,
69
- attention_mask=attention_mask,
70
- token_type_ids=token_type_ids,
71
- )
 
 
 
 
 
 
 
72
  pooled_output = model_output[0][:, 0]
73
  pooled_output = F.normalize(pooled_output, p=2, dim=-1)
74
  embeddings.append(pooled_output)
75
 
76
  return torch.cat(embeddings, dim=-1)
 
 
 
 
 
77
 
78
- def load_weights_from_automodels(
79
- self, in_models: List[str], has_pooling_layer: List[bool]
80
- ):
81
- model_list = []
82
- for i, model_name in enumerate(in_models):
83
- model = AutoModel.from_pretrained(
84
- model_name,
85
- add_pooling_layer=has_pooling_layer[i],
86
- trust_remote_code=True,
87
- )
 
 
 
 
 
 
88
  model.eval()
89
- model_list.append(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
- self.model = nn.ModuleDict(
92
- {f"model_{i}": model for i, model in enumerate(model_list)}
93
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import torch
3
  import torch.nn as nn
4
  import torch.nn.functional as F
5
+ from transformers import BertModel, PreTrainedModel, BertConfig, PretrainedConfig, XLMRobertaTokenizerFast, \
6
+ AutoModel, PreTrainedTokenizerFast, BertTokenizer, PreTrainedTokenizer, PreTrainedTokenizerBase, AutoTokenizer, XLMRobertaTokenizer
7
  from typing import *
8
 
9
 
 
14
  super().__init__(**kwargs)
15
 
16
 
 
17
  class ConcatModel(PreTrainedModel):
18
  config_class = ConcatModelConfig
19
 
20
+ def __init__(self, models):
21
+ super().__init__(ConcatModelConfig())
22
+ self.models = models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  def forward(
25
  self,
 
29
  **kwargs
30
  ) -> torch.Tensor:
31
  embeddings = []
32
+ for i, model in enumerate(self.models):
33
+ if i == 0:
34
+ model_output = model(
35
+ input_ids=input_ids,
36
+ attention_mask=attention_mask,
37
+ token_type_ids=token_type_ids,
38
+ )
39
+ else:
40
+ model_output = model(
41
+ input_ids=kwargs["input_ids_" + str(i)],
42
+ attention_mask=kwargs["attention_mask_" + str(i)],
43
+ token_type_ids=kwargs.get("token_type_ids_" + str(i)),
44
+ )
45
  pooled_output = model_output[0][:, 0]
46
  pooled_output = F.normalize(pooled_output, p=2, dim=-1)
47
  embeddings.append(pooled_output)
48
 
49
  return torch.cat(embeddings, dim=-1)
50
+
51
+ def save_pretrained(self, save_directory):
52
+ for i, model in enumerate(self.models):
53
+ path = os.path.join(save_directory, f"model_{i}")
54
+ model.save_pretrained(path)
55
 
56
+ @classmethod
57
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
58
+ models = [
59
+ AutoModel.from_pretrained(f"{pretrained_model_name_or_path}/model_0", trust_remote_code=True),
60
+ AutoModel.from_pretrained(f"{pretrained_model_name_or_path}/model_1", trust_remote_code=True)
61
+ ]
62
+ return cls(models)
63
+
64
+ def __repr__(self):
65
+ s = "ConcatModel with models:"
66
+ for i, model in enumerate(self.models):
67
+ s += f"\nModel {i}: {model}"
68
+ return s
69
+
70
+ def eval(self):
71
+ for model in self.models:
72
  model.eval()
73
+ return self
74
+
75
+ def cuda(self):
76
+ for i, model in enumerate(self.models):
77
+ self.models[i] = model.cuda()
78
+ return self
79
+
80
+ class ConcatTokenizer(PreTrainedTokenizer):
81
+ """
82
+ A custom tokenizer to handle multiple tokenizers for concatenated models.
83
+ This tokenizer will delegate tokenization to the underlying individual tokenizers.
84
+ """
85
+
86
+ def __init__(self, tokenizers, **kwargs):
87
+ self.tokenizers = tokenizers
88
+
89
+ def tokenize(self, text: str, **kwargs):
90
+ """
91
+ Tokenizes text using all tokenizers.
92
+ """
93
+ return [tokenizer.tokenize(text, **kwargs) for tokenizer in self.tokenizers]
94
+
95
+ def __call__(self, text, **kwargs):
96
+ """
97
+ Tokenize and encode input text using all tokenizers.
98
+ Returns combined inputs.
99
+ """
100
+ combined_inputs = {}
101
+ for i, tokenizer in enumerate(self.tokenizers):
102
+ encoded = tokenizer(text, **kwargs)
103
+ # Prefix the keys to distinguish between tokenizers
104
+ for key, value in encoded.items():
105
+ _key = key
106
+ if i > 0:
107
+ _key = f"{key}_{i}"
108
+ combined_inputs[_key] = value
109
+
110
+ return combined_inputs
111
+
112
+ def batch_encode_plus(self, batch_text_or_text_pairs, **kwargs):
113
+ """
114
+ Handles batch tokenization for all tokenizers.
115
+ """
116
+ combined_inputs = {}
117
+ for i, tokenizer in enumerate(self.tokenizers):
118
+ encoded_batch = tokenizer.batch_encode_plus(batch_text_or_text_pairs, **kwargs)
119
+ for key, value in encoded_batch.items():
120
+ _key = key
121
+ if i > 0:
122
+ _key = f"{key}_{i}"
123
+ combined_inputs[_key] = value
124
+
125
+ return combined_inputs
126
+
127
+ def decode(self, token_ids, **kwargs):
128
+ """
129
+ Decode tokens using the first tokenizer (or specific one, if required).
130
+ """
131
+ # Choose the primary tokenizer for decoding (default: model_0)
132
+ return self.tokenizers[0].decode(token_ids, **kwargs)
133
+
134
+ def save_pretrained(self, save_directory):
135
+ """
136
+ Save the tokenizers to the specified directory.
137
+ """
138
+ for i, tokenizer in enumerate(self.tokenizers):
139
+ path = os.path.join(save_directory, f"model_{i}")
140
+ tokenizer.save_pretrained(path)
141
 
142
+ @classmethod
143
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
144
+ """
145
+ Load the tokenizers from the specified directory.
146
+ """
147
+ tokenizers = [
148
+ XLMRobertaTokenizerFast.from_pretrained(f"{pretrained_model_name_or_path}/model_0"),
149
+ BertTokenizer.from_pretrained(f"{pretrained_model_name_or_path}/model_1")
150
+ ]
151
+ return cls(tokenizers)
152
+
153
+ def __repr__(self):
154
+ s = "ConcatTokenizer with tokenizers:"
155
+ for i, tokenizer in enumerate(self.tokenizers):
156
+ s += f"\nTokenizer {i}: {tokenizer}"
157
+ return s
pyproject.toml CHANGED
@@ -22,4 +22,7 @@ torch = "2.5.0"
22
  einops = "0.8.0"
23
  numpy = "1.26.4"
24
  setuptools = "75.6.0"
25
- wheel = "0.45.1"
 
 
 
 
22
  einops = "0.8.0"
23
  numpy = "1.26.4"
24
  setuptools = "75.6.0"
25
+ wheel = "0.45.1"
26
+ tiktoken = "0.8.0"
27
+ protobuf = "5.29.0"
28
+ sentencepiece = "0.2.0"
save_safetensors.py CHANGED
@@ -1,16 +1,20 @@
1
- from transformers import BertTokenizer
2
- from modeling_jina_v3_arctic_s import ConcatModel, ConcatModelConfig
3
 
4
- config = ConcatModelConfig()
5
- model = ConcatModel(config)
6
- model.load_weights_from_automodels(
7
- in_models=['jinaai/jina-embeddings-v3', 'Snowflake/snowflake-arctic-embed-s'],
8
- has_pooling_layer=[True, True]
9
- )
10
 
11
- tokenizer = BertTokenizer(vocab_file='vocab.txt')
 
 
 
 
12
 
13
- output_path = 'model'
 
 
 
 
 
 
14
  model.save_pretrained(output_path)
15
  tokenizer.save_pretrained(output_path)
16
 
 
1
+ from transformers import AutoModel, BertTokenizer, XLMRobertaTokenizerFast
2
+ from temp.modeling_jina_v3_arctic_s import ConcatModel, ConcatTokenizer
3
 
 
 
 
 
 
 
4
 
5
+ models = [
6
+ AutoModel.from_pretrained("jinaai/jina-embeddings-v3", trust_remote_code=True),
7
+ AutoModel.from_pretrained("Snowflake/snowflake-arctic-embed-s", trust_remote_code=True)
8
+ ]
9
+ model = ConcatModel(models)
10
 
11
+ tokenizers = [
12
+ XLMRobertaTokenizerFast.from_pretrained("jinaai/jina-embeddings-v3"),
13
+ BertTokenizer.from_pretrained("Snowflake/snowflake-arctic-embed-s")
14
+ ]
15
+ tokenizer = ConcatTokenizer(tokenizers)
16
+
17
+ output_path = 'temp'
18
  model.save_pretrained(output_path)
19
  tokenizer.save_pretrained(output_path)
20