Commit
·
2c9b2dc
1
Parent(s):
41aea3d
Initial commit
Browse files- .gitattributes +1 -0
- .gitignore +4 -0
- model_0/config.json +50 -0
- model_0/model.safetensors +3 -0
- model_0/special_tokens_map.json +51 -0
- model_0/tokenizer.json +3 -0
- model_0/tokenizer_config.json +54 -0
- model_1/config.json +25 -0
- model_1/model.safetensors +3 -0
- model_1/special_tokens_map.json +37 -0
- model_1/tokenizer_config.json +64 -0
- model_1/vocab.txt +0 -0
- modeling_mgte_arctic_s.py +177 -0
- pyproject.toml +21 -0
- save_safetensors.py +21 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
model_0/tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
poetry.lock
|
3 |
+
**/__pycache__/
|
4 |
+
temp/
|
model_0/config.json
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Alibaba-NLP/gte-multilingual-base",
|
3 |
+
"architectures": [
|
4 |
+
"NewModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "Alibaba-NLP/new-impl--configuration.NewConfig",
|
9 |
+
"AutoModel": "Alibaba-NLP/new-impl--modeling.NewModel",
|
10 |
+
"AutoModelForMaskedLM": "Alibaba-NLP/new-impl--modeling.NewForMaskedLM",
|
11 |
+
"AutoModelForMultipleChoice": "Alibaba-NLP/new-impl--modeling.NewForMultipleChoice",
|
12 |
+
"AutoModelForQuestionAnswering": "Alibaba-NLP/new-impl--modeling.NewForQuestionAnswering",
|
13 |
+
"AutoModelForSequenceClassification": "Alibaba-NLP/new-impl--modeling.NewForSequenceClassification",
|
14 |
+
"AutoModelForTokenClassification": "Alibaba-NLP/new-impl--modeling.NewForTokenClassification"
|
15 |
+
},
|
16 |
+
"classifier_dropout": 0.0,
|
17 |
+
"hidden_act": "gelu",
|
18 |
+
"hidden_dropout_prob": 0.1,
|
19 |
+
"hidden_size": 768,
|
20 |
+
"id2label": {
|
21 |
+
"0": "LABEL_0"
|
22 |
+
},
|
23 |
+
"initializer_range": 0.02,
|
24 |
+
"intermediate_size": 3072,
|
25 |
+
"label2id": {
|
26 |
+
"LABEL_0": 0
|
27 |
+
},
|
28 |
+
"layer_norm_eps": 1e-12,
|
29 |
+
"layer_norm_type": "layer_norm",
|
30 |
+
"logn_attention_clip1": false,
|
31 |
+
"logn_attention_scale": false,
|
32 |
+
"max_position_embeddings": 8192,
|
33 |
+
"model_type": "new",
|
34 |
+
"num_attention_heads": 12,
|
35 |
+
"num_hidden_layers": 12,
|
36 |
+
"pack_qkv": true,
|
37 |
+
"pad_token_id": 1,
|
38 |
+
"position_embedding_type": "rope",
|
39 |
+
"rope_scaling": {
|
40 |
+
"factor": 8.0,
|
41 |
+
"type": "ntk"
|
42 |
+
},
|
43 |
+
"rope_theta": 20000,
|
44 |
+
"torch_dtype": "float32",
|
45 |
+
"transformers_version": "4.46.3",
|
46 |
+
"type_vocab_size": 1,
|
47 |
+
"unpad_inputs": false,
|
48 |
+
"use_memory_efficient_attention": false,
|
49 |
+
"vocab_size": 250048
|
50 |
+
}
|
model_0/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5b9411237518e546ca2e4159b79328a76af59bee7351e250227fd9b4924e93a
|
3 |
+
size 1221487872
|
model_0/special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": false,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
model_0/tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a56def25aa40facc030ea8b0b87f3688e4b3c39eb8b45d5702b3a1300fe2a20
|
3 |
+
size 17082734
|
model_0/tokenizer_config.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<s>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<pad>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "</s>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<unk>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"250001": {
|
36 |
+
"content": "<mask>",
|
37 |
+
"lstrip": true,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"bos_token": "<s>",
|
45 |
+
"clean_up_tokenization_spaces": true,
|
46 |
+
"cls_token": "<s>",
|
47 |
+
"eos_token": "</s>",
|
48 |
+
"mask_token": "<mask>",
|
49 |
+
"model_max_length": 32768,
|
50 |
+
"pad_token": "<pad>",
|
51 |
+
"sep_token": "</s>",
|
52 |
+
"tokenizer_class": "XLMRobertaTokenizer",
|
53 |
+
"unk_token": "<unk>"
|
54 |
+
}
|
model_1/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Snowflake/snowflake-arctic-embed-s",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.1,
|
10 |
+
"hidden_size": 384,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 1536,
|
13 |
+
"layer_norm_eps": 1e-12,
|
14 |
+
"max_position_embeddings": 512,
|
15 |
+
"model_type": "bert",
|
16 |
+
"num_attention_heads": 12,
|
17 |
+
"num_hidden_layers": 12,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"position_embedding_type": "absolute",
|
20 |
+
"torch_dtype": "float32",
|
21 |
+
"transformers_version": "4.46.3",
|
22 |
+
"type_vocab_size": 2,
|
23 |
+
"use_cache": true,
|
24 |
+
"vocab_size": 30522
|
25 |
+
}
|
model_1/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:440cc6f580c0ef00c2c9433010145fecab69c3637322d674915ef3f120ac23e3
|
3 |
+
size 133462128
|
model_1/special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
model_1/tokenizer_config.json
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"mask_token": "[MASK]",
|
49 |
+
"max_length": 512,
|
50 |
+
"model_max_length": 512,
|
51 |
+
"never_split": null,
|
52 |
+
"pad_to_multiple_of": null,
|
53 |
+
"pad_token": "[PAD]",
|
54 |
+
"pad_token_type_id": 0,
|
55 |
+
"padding_side": "right",
|
56 |
+
"sep_token": "[SEP]",
|
57 |
+
"stride": 0,
|
58 |
+
"strip_accents": null,
|
59 |
+
"tokenize_chinese_chars": true,
|
60 |
+
"tokenizer_class": "BertTokenizer",
|
61 |
+
"truncation_side": "right",
|
62 |
+
"truncation_strategy": "longest_first",
|
63 |
+
"unk_token": "[UNK]"
|
64 |
+
}
|
model_1/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
modeling_mgte_arctic_s.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from transformers import BertModel, PreTrainedModel, BertConfig, PretrainedConfig, XLMRobertaTokenizerFast, \
|
6 |
+
AutoModel, PreTrainedTokenizerFast, BertTokenizer, PreTrainedTokenizer, PreTrainedTokenizerBase, AutoTokenizer, XLMRobertaTokenizer
|
7 |
+
from typing import *
|
8 |
+
|
9 |
+
|
10 |
+
class ConcatModelConfig(PretrainedConfig):
|
11 |
+
model_type = "mgte-arctic-s"
|
12 |
+
|
13 |
+
def __init__(self, **kwargs):
|
14 |
+
super().__init__(**kwargs)
|
15 |
+
|
16 |
+
|
17 |
+
class ConcatModel(PreTrainedModel):
|
18 |
+
config_class = ConcatModelConfig
|
19 |
+
|
20 |
+
def __init__(self, models):
|
21 |
+
super().__init__(ConcatModelConfig())
|
22 |
+
self.models = models
|
23 |
+
|
24 |
+
def forward(
|
25 |
+
self,
|
26 |
+
input_ids: torch.Tensor,
|
27 |
+
attention_mask: torch.Tensor,
|
28 |
+
token_type_ids: torch.Tensor = None,
|
29 |
+
**kwargs
|
30 |
+
) -> torch.Tensor:
|
31 |
+
embeddings = []
|
32 |
+
for i, model in enumerate(self.models):
|
33 |
+
if i == 0:
|
34 |
+
model_output = model(
|
35 |
+
input_ids=input_ids,
|
36 |
+
attention_mask=attention_mask,
|
37 |
+
token_type_ids=token_type_ids,
|
38 |
+
)
|
39 |
+
else:
|
40 |
+
model_output = model(
|
41 |
+
input_ids=kwargs["input_ids_" + str(i)],
|
42 |
+
attention_mask=kwargs["attention_mask_" + str(i)],
|
43 |
+
token_type_ids=kwargs.get("token_type_ids_" + str(i)),
|
44 |
+
)
|
45 |
+
pooled_output = model_output[0][:, 0]
|
46 |
+
pooled_output = F.normalize(pooled_output, p=2, dim=-1)
|
47 |
+
embeddings.append(pooled_output)
|
48 |
+
|
49 |
+
return torch.cat(embeddings, dim=-1)
|
50 |
+
|
51 |
+
def save_pretrained(self, save_directory):
|
52 |
+
for i, model in enumerate(self.models):
|
53 |
+
path = os.path.join(save_directory, f"model_{i}")
|
54 |
+
model.save_pretrained(path)
|
55 |
+
|
56 |
+
# @classmethod
|
57 |
+
# def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
|
58 |
+
# print("ConcatModel from_pretrained")
|
59 |
+
# print(pretrained_model_name_or_path)
|
60 |
+
# x = super().from_pretrained(pretrained_model_name_or_path)
|
61 |
+
# print(x)
|
62 |
+
# models = []
|
63 |
+
# return cls(models)
|
64 |
+
|
65 |
+
@classmethod
|
66 |
+
def from_pretrained(cls, pretrained_model_name_or_path, model_id="model_0", **kwargs):
|
67 |
+
# Identify the subfolder for the model
|
68 |
+
model_path = f"{pretrained_model_name_or_path}/{model_id}"
|
69 |
+
print("---- model_path:", model_path)
|
70 |
+
|
71 |
+
# Load model configuration
|
72 |
+
config = cls.config_class.from_pretrained(model_path, **kwargs)
|
73 |
+
print("---- config:", config)
|
74 |
+
|
75 |
+
# Initialize the model
|
76 |
+
model = cls(config, **kwargs)
|
77 |
+
|
78 |
+
# Load weights
|
79 |
+
state_dict = torch.load(f"{model_path}/pytorch_model.bin", map_location="cpu")
|
80 |
+
model.load_state_dict(state_dict)
|
81 |
+
|
82 |
+
return model
|
83 |
+
|
84 |
+
def __repr__(self):
|
85 |
+
s = "ConcatModel with models:"
|
86 |
+
for i, model in enumerate(self.models):
|
87 |
+
s += f"\nModel {i}: {model}"
|
88 |
+
return s
|
89 |
+
|
90 |
+
def eval(self):
|
91 |
+
for model in self.models:
|
92 |
+
model.eval()
|
93 |
+
return self
|
94 |
+
|
95 |
+
def cuda(self):
|
96 |
+
for i, model in enumerate(self.models):
|
97 |
+
self.models[i] = model.cuda()
|
98 |
+
return self
|
99 |
+
|
100 |
+
class ConcatTokenizer(PreTrainedTokenizer):
|
101 |
+
"""
|
102 |
+
A custom tokenizer to handle multiple tokenizers for concatenated models.
|
103 |
+
This tokenizer will delegate tokenization to the underlying individual tokenizers.
|
104 |
+
"""
|
105 |
+
|
106 |
+
def __init__(self, tokenizers, **kwargs):
|
107 |
+
self.tokenizers = tokenizers
|
108 |
+
|
109 |
+
def tokenize(self, text: str, **kwargs):
|
110 |
+
"""
|
111 |
+
Tokenizes text using all tokenizers.
|
112 |
+
"""
|
113 |
+
return [tokenizer.tokenize(text, **kwargs) for tokenizer in self.tokenizers]
|
114 |
+
|
115 |
+
def __call__(self, text, **kwargs):
|
116 |
+
"""
|
117 |
+
Tokenize and encode input text using all tokenizers.
|
118 |
+
Returns combined inputs.
|
119 |
+
"""
|
120 |
+
combined_inputs = {}
|
121 |
+
for i, tokenizer in enumerate(self.tokenizers):
|
122 |
+
encoded = tokenizer(text, **kwargs)
|
123 |
+
# Prefix the keys to distinguish between tokenizers
|
124 |
+
for key, value in encoded.items():
|
125 |
+
_key = key
|
126 |
+
if i > 0:
|
127 |
+
_key = f"{key}_{i}"
|
128 |
+
combined_inputs[_key] = value
|
129 |
+
|
130 |
+
return combined_inputs
|
131 |
+
|
132 |
+
def batch_encode_plus(self, batch_text_or_text_pairs, **kwargs):
|
133 |
+
"""
|
134 |
+
Handles batch tokenization for all tokenizers.
|
135 |
+
"""
|
136 |
+
combined_inputs = {}
|
137 |
+
for i, tokenizer in enumerate(self.tokenizers):
|
138 |
+
encoded_batch = tokenizer.batch_encode_plus(batch_text_or_text_pairs, **kwargs)
|
139 |
+
for key, value in encoded_batch.items():
|
140 |
+
_key = key
|
141 |
+
if i > 0:
|
142 |
+
_key = f"{key}_{i}"
|
143 |
+
combined_inputs[_key] = value
|
144 |
+
|
145 |
+
return combined_inputs
|
146 |
+
|
147 |
+
def decode(self, token_ids, **kwargs):
|
148 |
+
"""
|
149 |
+
Decode tokens using the first tokenizer (or specific one, if required).
|
150 |
+
"""
|
151 |
+
# Choose the primary tokenizer for decoding (default: model_0)
|
152 |
+
return self.tokenizers[0].decode(token_ids, **kwargs)
|
153 |
+
|
154 |
+
def save_pretrained(self, save_directory):
|
155 |
+
"""
|
156 |
+
Save the tokenizers to the specified directory.
|
157 |
+
"""
|
158 |
+
for i, tokenizer in enumerate(self.tokenizers):
|
159 |
+
path = os.path.join(save_directory, f"model_{i}")
|
160 |
+
tokenizer.save_pretrained(path)
|
161 |
+
|
162 |
+
@classmethod
|
163 |
+
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
|
164 |
+
"""
|
165 |
+
Load the tokenizers from the specified directory.
|
166 |
+
"""
|
167 |
+
tokenizers = [
|
168 |
+
XLMRobertaTokenizerFast.from_pretrained(f"{pretrained_model_name_or_path}/model_0"),
|
169 |
+
BertTokenizer.from_pretrained(f"{pretrained_model_name_or_path}/model_1")
|
170 |
+
]
|
171 |
+
return cls(tokenizers)
|
172 |
+
|
173 |
+
def __repr__(self):
|
174 |
+
s = "ConcatTokenizer with tokenizers:"
|
175 |
+
for i, tokenizer in enumerate(self.tokenizers):
|
176 |
+
s += f"\nTokenizer {i}: {tokenizer}"
|
177 |
+
return s
|
pyproject.toml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[build-system]
|
2 |
+
requires = ["poetry-core"]
|
3 |
+
build-backend = "poetry.core.masonry.api"
|
4 |
+
|
5 |
+
[tool.poetry]
|
6 |
+
name = "mgte-arctic-s"
|
7 |
+
version = "1.0.0"
|
8 |
+
description = "Upload ConcatModels."
|
9 |
+
authors = [
|
10 |
+
"Michael Dinzinger"
|
11 |
+
]
|
12 |
+
homepage = "https://www.fim.uni-passau.de"
|
13 |
+
repository = "https://www.fim.uni-passau.de"
|
14 |
+
readme = "README.md"
|
15 |
+
license = "MIT"
|
16 |
+
package-mode = false
|
17 |
+
|
18 |
+
[tool.poetry.dependencies]
|
19 |
+
python = ">=3.10,<3.12"
|
20 |
+
transformers = "4.46.3"
|
21 |
+
torch = "2.5.0"
|
save_safetensors.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModel, BertTokenizer, XLMRobertaTokenizerFast
|
2 |
+
from modeling_mgte_arctic_s import ConcatModel, ConcatTokenizer
|
3 |
+
|
4 |
+
|
5 |
+
models = [
|
6 |
+
AutoModel.from_pretrained("Alibaba-NLP/gte-multilingual-base", trust_remote_code=True),
|
7 |
+
AutoModel.from_pretrained("Snowflake/snowflake-arctic-embed-s", trust_remote_code=True)
|
8 |
+
]
|
9 |
+
model = ConcatModel(models)
|
10 |
+
|
11 |
+
tokenizers = [
|
12 |
+
XLMRobertaTokenizerFast.from_pretrained("Alibaba-NLP/gte-multilingual-base"),
|
13 |
+
BertTokenizer.from_pretrained("Snowflake/snowflake-arctic-embed-s")
|
14 |
+
]
|
15 |
+
tokenizer = ConcatTokenizer(tokenizers)
|
16 |
+
|
17 |
+
output_path = 'temp'
|
18 |
+
model.save_pretrained(output_path)
|
19 |
+
tokenizer.save_pretrained(output_path)
|
20 |
+
|
21 |
+
print(f'Model saved as {output_path}')
|