Upload 4 files
Browse files- open_clip_config.json +31 -0
- special_tokens_map.json +7 -0
- tokenizer_config (1).json +15 -0
- vocab.txt +0 -0
open_clip_config.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_cfg": {
|
| 3 |
+
"embed_dim": 512,
|
| 4 |
+
"vision_cfg": {
|
| 5 |
+
"timm_model_name": "vit_base_patch16_224",
|
| 6 |
+
"timm_model_pretrained": false,
|
| 7 |
+
"timm_pool": "",
|
| 8 |
+
"timm_proj": "linear",
|
| 9 |
+
"image_size": 224
|
| 10 |
+
},
|
| 11 |
+
"text_cfg": {
|
| 12 |
+
"hf_model_name": "microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract",
|
| 13 |
+
"hf_tokenizer_name": "microsoft/BiomedNLP-BiomedBERT-base-uncased-abstract",
|
| 14 |
+
"hf_proj_type": "mlp",
|
| 15 |
+
"hf_pooler_type": "cls_last_hidden_state_pooler",
|
| 16 |
+
"context_length": 256
|
| 17 |
+
}
|
| 18 |
+
},
|
| 19 |
+
"preprocess_cfg": {
|
| 20 |
+
"mean": [
|
| 21 |
+
0.48145466,
|
| 22 |
+
0.4578275,
|
| 23 |
+
0.40821073
|
| 24 |
+
],
|
| 25 |
+
"std": [
|
| 26 |
+
0.26862954,
|
| 27 |
+
0.26130258,
|
| 28 |
+
0.27577711
|
| 29 |
+
]
|
| 30 |
+
}
|
| 31 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cls_token": "[CLS]",
|
| 3 |
+
"mask_token": "[MASK]",
|
| 4 |
+
"pad_token": "[PAD]",
|
| 5 |
+
"sep_token": "[SEP]",
|
| 6 |
+
"unk_token": "[UNK]"
|
| 7 |
+
}
|
tokenizer_config (1).json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"clean_up_tokenization_spaces": true,
|
| 3 |
+
"cls_token": "[CLS]",
|
| 4 |
+
"do_basic_tokenize": true,
|
| 5 |
+
"do_lower_case": true,
|
| 6 |
+
"mask_token": "[MASK]",
|
| 7 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 8 |
+
"never_split": null,
|
| 9 |
+
"pad_token": "[PAD]",
|
| 10 |
+
"sep_token": "[SEP]",
|
| 11 |
+
"strip_accents": null,
|
| 12 |
+
"tokenize_chinese_chars": true,
|
| 13 |
+
"tokenizer_class": "BertTokenizer",
|
| 14 |
+
"unk_token": "[UNK]"
|
| 15 |
+
}
|
vocab.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|