Add SetFit model
Browse files- 1_Pooling/config.json +3 -3
- README.md +4 -4
- config.json +19 -12
- config_sentence_transformers.json +3 -3
- model.safetensors +2 -2
- modules.json +6 -0
- sentence_bert_config.json +1 -1
- special_tokens_map.json +5 -19
- tokenizer.json +0 -0
- tokenizer_config.json +15 -17
- vocab.txt +0 -5
    	
        1_Pooling/config.json
    CHANGED
    
    | @@ -1,7 +1,7 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
            -
              "word_embedding_dimension":  | 
| 3 | 
            -
              "pooling_mode_cls_token":  | 
| 4 | 
            -
              "pooling_mode_mean_tokens":  | 
| 5 | 
             
              "pooling_mode_max_tokens": false,
         | 
| 6 | 
             
              "pooling_mode_mean_sqrt_len_tokens": false
         | 
| 7 | 
             
            }
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
            +
              "word_embedding_dimension": 384,
         | 
| 3 | 
            +
              "pooling_mode_cls_token": true,
         | 
| 4 | 
            +
              "pooling_mode_mean_tokens": false,
         | 
| 5 | 
             
              "pooling_mode_max_tokens": false,
         | 
| 6 | 
             
              "pooling_mode_mean_sqrt_len_tokens": false
         | 
| 7 | 
             
            }
         | 
    	
        README.md
    CHANGED
    
    | @@ -5,7 +5,7 @@ tags: | |
| 5 | 
             
            - sentence-transformers
         | 
| 6 | 
             
            - text-classification
         | 
| 7 | 
             
            - generated_from_setfit_trainer
         | 
| 8 | 
            -
            base_model:  | 
| 9 | 
             
            metrics:
         | 
| 10 | 
             
            - accuracy
         | 
| 11 | 
             
            widget:
         | 
| @@ -23,9 +23,9 @@ pipeline_tag: text-classification | |
| 23 | 
             
            inference: false
         | 
| 24 | 
             
            ---
         | 
| 25 |  | 
| 26 | 
            -
            # SetFit with  | 
| 27 |  | 
| 28 | 
            -
            This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [ | 
| 29 |  | 
| 30 | 
             
            The model has been trained using an efficient few-shot learning technique that involves:
         | 
| 31 |  | 
| @@ -36,7 +36,7 @@ The model has been trained using an efficient few-shot learning technique that i | |
| 36 |  | 
| 37 | 
             
            ### Model Description
         | 
| 38 | 
             
            - **Model Type:** SetFit
         | 
| 39 | 
            -
            - **Sentence Transformer body:** [ | 
| 40 | 
             
            - **Classification head:** a MultiOutputClassifier instance
         | 
| 41 | 
             
            - **Maximum Sequence Length:** 512 tokens
         | 
| 42 | 
             
            - **Number of Classes:** 5 classes
         | 
|  | |
| 5 | 
             
            - sentence-transformers
         | 
| 6 | 
             
            - text-classification
         | 
| 7 | 
             
            - generated_from_setfit_trainer
         | 
| 8 | 
            +
            base_model: BAAI/bge-small-en-v1.5
         | 
| 9 | 
             
            metrics:
         | 
| 10 | 
             
            - accuracy
         | 
| 11 | 
             
            widget:
         | 
|  | |
| 23 | 
             
            inference: false
         | 
| 24 | 
             
            ---
         | 
| 25 |  | 
| 26 | 
            +
            # SetFit with BAAI/bge-small-en-v1.5
         | 
| 27 |  | 
| 28 | 
            +
            This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) as the Sentence Transformer embedding model. A MultiOutputClassifier instance is used for classification.
         | 
| 29 |  | 
| 30 | 
             
            The model has been trained using an efficient few-shot learning technique that involves:
         | 
| 31 |  | 
|  | |
| 36 |  | 
| 37 | 
             
            ### Model Description
         | 
| 38 | 
             
            - **Model Type:** SetFit
         | 
| 39 | 
            +
            - **Sentence Transformer body:** [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5)
         | 
| 40 | 
             
            - **Classification head:** a MultiOutputClassifier instance
         | 
| 41 | 
             
            - **Maximum Sequence Length:** 512 tokens
         | 
| 42 | 
             
            - **Number of Classes:** 5 classes
         | 
    	
        config.json
    CHANGED
    
    | @@ -1,24 +1,31 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
            -
              "_name_or_path": "/Users/bot/.cache/torch/sentence_transformers/ | 
| 3 | 
             
              "architectures": [
         | 
| 4 | 
            -
                " | 
| 5 | 
             
              ],
         | 
| 6 | 
             
              "attention_probs_dropout_prob": 0.1,
         | 
| 7 | 
            -
              " | 
| 8 | 
            -
              "eos_token_id": 2,
         | 
| 9 | 
             
              "hidden_act": "gelu",
         | 
| 10 | 
             
              "hidden_dropout_prob": 0.1,
         | 
| 11 | 
            -
              "hidden_size":  | 
|  | |
|  | |
|  | |
| 12 | 
             
              "initializer_range": 0.02,
         | 
| 13 | 
            -
              "intermediate_size":  | 
| 14 | 
            -
              " | 
| 15 | 
            -
             | 
| 16 | 
            -
               | 
|  | |
|  | |
|  | |
| 17 | 
             
              "num_attention_heads": 12,
         | 
| 18 | 
             
              "num_hidden_layers": 12,
         | 
| 19 | 
            -
              "pad_token_id":  | 
| 20 | 
            -
              " | 
| 21 | 
             
              "torch_dtype": "float32",
         | 
| 22 | 
             
              "transformers_version": "4.40.2",
         | 
| 23 | 
            -
              " | 
|  | |
|  | |
| 24 | 
             
            }
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
            +
              "_name_or_path": "/Users/bot/.cache/torch/sentence_transformers/BAAI_bge-small-en-v1.5/",
         | 
| 3 | 
             
              "architectures": [
         | 
| 4 | 
            +
                "BertModel"
         | 
| 5 | 
             
              ],
         | 
| 6 | 
             
              "attention_probs_dropout_prob": 0.1,
         | 
| 7 | 
            +
              "classifier_dropout": null,
         | 
|  | |
| 8 | 
             
              "hidden_act": "gelu",
         | 
| 9 | 
             
              "hidden_dropout_prob": 0.1,
         | 
| 10 | 
            +
              "hidden_size": 384,
         | 
| 11 | 
            +
              "id2label": {
         | 
| 12 | 
            +
                "0": "LABEL_0"
         | 
| 13 | 
            +
              },
         | 
| 14 | 
             
              "initializer_range": 0.02,
         | 
| 15 | 
            +
              "intermediate_size": 1536,
         | 
| 16 | 
            +
              "label2id": {
         | 
| 17 | 
            +
                "LABEL_0": 0
         | 
| 18 | 
            +
              },
         | 
| 19 | 
            +
              "layer_norm_eps": 1e-12,
         | 
| 20 | 
            +
              "max_position_embeddings": 512,
         | 
| 21 | 
            +
              "model_type": "bert",
         | 
| 22 | 
             
              "num_attention_heads": 12,
         | 
| 23 | 
             
              "num_hidden_layers": 12,
         | 
| 24 | 
            +
              "pad_token_id": 0,
         | 
| 25 | 
            +
              "position_embedding_type": "absolute",
         | 
| 26 | 
             
              "torch_dtype": "float32",
         | 
| 27 | 
             
              "transformers_version": "4.40.2",
         | 
| 28 | 
            +
              "type_vocab_size": 2,
         | 
| 29 | 
            +
              "use_cache": true,
         | 
| 30 | 
            +
              "vocab_size": 30522
         | 
| 31 | 
             
            }
         | 
    	
        config_sentence_transformers.json
    CHANGED
    
    | @@ -1,7 +1,7 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "__version__": {
         | 
| 3 | 
            -
                "sentence_transformers": "2. | 
| 4 | 
            -
                "transformers": "4. | 
| 5 | 
            -
                "pytorch": "1. | 
| 6 | 
             
              }
         | 
| 7 | 
             
            }
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "__version__": {
         | 
| 3 | 
            +
                "sentence_transformers": "2.2.2",
         | 
| 4 | 
            +
                "transformers": "4.28.1",
         | 
| 5 | 
            +
                "pytorch": "1.13.0+cu117"
         | 
| 6 | 
             
              }
         | 
| 7 | 
             
            }
         | 
    	
        model.safetensors
    CHANGED
    
    | @@ -1,3 +1,3 @@ | |
| 1 | 
             
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256: | 
| 3 | 
            -
            size  | 
|  | |
| 1 | 
             
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:ea1d11a3f23d14fe09fc1826fc7944e89c09a634d2217d57a21dd136805ee3e8
         | 
| 3 | 
            +
            size 133462128
         | 
    	
        modules.json
    CHANGED
    
    | @@ -10,5 +10,11 @@ | |
| 10 | 
             
                "name": "1",
         | 
| 11 | 
             
                "path": "1_Pooling",
         | 
| 12 | 
             
                "type": "sentence_transformers.models.Pooling"
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 13 | 
             
              }
         | 
| 14 | 
             
            ]
         | 
|  | |
| 10 | 
             
                "name": "1",
         | 
| 11 | 
             
                "path": "1_Pooling",
         | 
| 12 | 
             
                "type": "sentence_transformers.models.Pooling"
         | 
| 13 | 
            +
              },
         | 
| 14 | 
            +
              {
         | 
| 15 | 
            +
                "idx": 2,
         | 
| 16 | 
            +
                "name": "2",
         | 
| 17 | 
            +
                "path": "2_Normalize",
         | 
| 18 | 
            +
                "type": "sentence_transformers.models.Normalize"
         | 
| 19 | 
             
              }
         | 
| 20 | 
             
            ]
         | 
    	
        sentence_bert_config.json
    CHANGED
    
    | @@ -1,4 +1,4 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "max_seq_length": 512,
         | 
| 3 | 
            -
              "do_lower_case":  | 
| 4 | 
             
            }
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "max_seq_length": 512,
         | 
| 3 | 
            +
              "do_lower_case": true
         | 
| 4 | 
             
            }
         | 
    	
        special_tokens_map.json
    CHANGED
    
    | @@ -1,41 +1,27 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
            -
              "bos_token": {
         | 
| 3 | 
            -
                "content": "<s>",
         | 
| 4 | 
            -
                "lstrip": false,
         | 
| 5 | 
            -
                "normalized": false,
         | 
| 6 | 
            -
                "rstrip": false,
         | 
| 7 | 
            -
                "single_word": false
         | 
| 8 | 
            -
              },
         | 
| 9 | 
             
              "cls_token": {
         | 
| 10 | 
            -
                "content": " | 
| 11 | 
            -
                "lstrip": false,
         | 
| 12 | 
            -
                "normalized": false,
         | 
| 13 | 
            -
                "rstrip": false,
         | 
| 14 | 
            -
                "single_word": false
         | 
| 15 | 
            -
              },
         | 
| 16 | 
            -
              "eos_token": {
         | 
| 17 | 
            -
                "content": "</s>",
         | 
| 18 | 
             
                "lstrip": false,
         | 
| 19 | 
             
                "normalized": false,
         | 
| 20 | 
             
                "rstrip": false,
         | 
| 21 | 
             
                "single_word": false
         | 
| 22 | 
             
              },
         | 
| 23 | 
             
              "mask_token": {
         | 
| 24 | 
            -
                "content": " | 
| 25 | 
            -
                "lstrip":  | 
| 26 | 
             
                "normalized": false,
         | 
| 27 | 
             
                "rstrip": false,
         | 
| 28 | 
             
                "single_word": false
         | 
| 29 | 
             
              },
         | 
| 30 | 
             
              "pad_token": {
         | 
| 31 | 
            -
                "content": " | 
| 32 | 
             
                "lstrip": false,
         | 
| 33 | 
             
                "normalized": false,
         | 
| 34 | 
             
                "rstrip": false,
         | 
| 35 | 
             
                "single_word": false
         | 
| 36 | 
             
              },
         | 
| 37 | 
             
              "sep_token": {
         | 
| 38 | 
            -
                "content": " | 
| 39 | 
             
                "lstrip": false,
         | 
| 40 | 
             
                "normalized": false,
         | 
| 41 | 
             
                "rstrip": false,
         | 
|  | |
| 1 | 
             
            {
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 2 | 
             
              "cls_token": {
         | 
| 3 | 
            +
                "content": "[CLS]",
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 4 | 
             
                "lstrip": false,
         | 
| 5 | 
             
                "normalized": false,
         | 
| 6 | 
             
                "rstrip": false,
         | 
| 7 | 
             
                "single_word": false
         | 
| 8 | 
             
              },
         | 
| 9 | 
             
              "mask_token": {
         | 
| 10 | 
            +
                "content": "[MASK]",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
             
                "normalized": false,
         | 
| 13 | 
             
                "rstrip": false,
         | 
| 14 | 
             
                "single_word": false
         | 
| 15 | 
             
              },
         | 
| 16 | 
             
              "pad_token": {
         | 
| 17 | 
            +
                "content": "[PAD]",
         | 
| 18 | 
             
                "lstrip": false,
         | 
| 19 | 
             
                "normalized": false,
         | 
| 20 | 
             
                "rstrip": false,
         | 
| 21 | 
             
                "single_word": false
         | 
| 22 | 
             
              },
         | 
| 23 | 
             
              "sep_token": {
         | 
| 24 | 
            +
                "content": "[SEP]",
         | 
| 25 | 
             
                "lstrip": false,
         | 
| 26 | 
             
                "normalized": false,
         | 
| 27 | 
             
                "rstrip": false,
         | 
    	
        tokenizer.json
    CHANGED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    CHANGED
    
    | @@ -1,59 +1,57 @@ | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "added_tokens_decoder": {
         | 
| 3 | 
             
                "0": {
         | 
| 4 | 
            -
                  "content": " | 
| 5 | 
             
                  "lstrip": false,
         | 
| 6 | 
             
                  "normalized": false,
         | 
| 7 | 
             
                  "rstrip": false,
         | 
| 8 | 
             
                  "single_word": false,
         | 
| 9 | 
             
                  "special": true
         | 
| 10 | 
             
                },
         | 
| 11 | 
            -
                " | 
| 12 | 
            -
                  "content": " | 
| 13 | 
             
                  "lstrip": false,
         | 
| 14 | 
             
                  "normalized": false,
         | 
| 15 | 
             
                  "rstrip": false,
         | 
| 16 | 
             
                  "single_word": false,
         | 
| 17 | 
             
                  "special": true
         | 
| 18 | 
             
                },
         | 
| 19 | 
            -
                " | 
| 20 | 
            -
                  "content": " | 
| 21 | 
             
                  "lstrip": false,
         | 
| 22 | 
             
                  "normalized": false,
         | 
| 23 | 
             
                  "rstrip": false,
         | 
| 24 | 
             
                  "single_word": false,
         | 
| 25 | 
             
                  "special": true
         | 
| 26 | 
             
                },
         | 
| 27 | 
            -
                " | 
| 28 | 
            -
                  "content": "[ | 
| 29 | 
             
                  "lstrip": false,
         | 
| 30 | 
             
                  "normalized": false,
         | 
| 31 | 
             
                  "rstrip": false,
         | 
| 32 | 
             
                  "single_word": false,
         | 
| 33 | 
             
                  "special": true
         | 
| 34 | 
             
                },
         | 
| 35 | 
            -
                " | 
| 36 | 
            -
                  "content": " | 
| 37 | 
            -
                  "lstrip":  | 
| 38 | 
             
                  "normalized": false,
         | 
| 39 | 
             
                  "rstrip": false,
         | 
| 40 | 
             
                  "single_word": false,
         | 
| 41 | 
             
                  "special": true
         | 
| 42 | 
             
                }
         | 
| 43 | 
             
              },
         | 
| 44 | 
            -
              "bos_token": "<s>",
         | 
| 45 | 
             
              "clean_up_tokenization_spaces": true,
         | 
| 46 | 
            -
              "cls_token": " | 
| 47 | 
             
              "do_basic_tokenize": true,
         | 
| 48 | 
             
              "do_lower_case": true,
         | 
| 49 | 
            -
              " | 
| 50 | 
            -
              "mask_token": "<mask>",
         | 
| 51 | 
             
              "model_max_length": 512,
         | 
| 52 | 
             
              "never_split": null,
         | 
| 53 | 
            -
              "pad_token": " | 
| 54 | 
            -
              "sep_token": " | 
| 55 | 
             
              "strip_accents": null,
         | 
| 56 | 
             
              "tokenize_chinese_chars": true,
         | 
| 57 | 
            -
              "tokenizer_class": " | 
| 58 | 
             
              "unk_token": "[UNK]"
         | 
| 59 | 
             
            }
         | 
|  | |
| 1 | 
             
            {
         | 
| 2 | 
             
              "added_tokens_decoder": {
         | 
| 3 | 
             
                "0": {
         | 
| 4 | 
            +
                  "content": "[PAD]",
         | 
| 5 | 
             
                  "lstrip": false,
         | 
| 6 | 
             
                  "normalized": false,
         | 
| 7 | 
             
                  "rstrip": false,
         | 
| 8 | 
             
                  "single_word": false,
         | 
| 9 | 
             
                  "special": true
         | 
| 10 | 
             
                },
         | 
| 11 | 
            +
                "100": {
         | 
| 12 | 
            +
                  "content": "[UNK]",
         | 
| 13 | 
             
                  "lstrip": false,
         | 
| 14 | 
             
                  "normalized": false,
         | 
| 15 | 
             
                  "rstrip": false,
         | 
| 16 | 
             
                  "single_word": false,
         | 
| 17 | 
             
                  "special": true
         | 
| 18 | 
             
                },
         | 
| 19 | 
            +
                "101": {
         | 
| 20 | 
            +
                  "content": "[CLS]",
         | 
| 21 | 
             
                  "lstrip": false,
         | 
| 22 | 
             
                  "normalized": false,
         | 
| 23 | 
             
                  "rstrip": false,
         | 
| 24 | 
             
                  "single_word": false,
         | 
| 25 | 
             
                  "special": true
         | 
| 26 | 
             
                },
         | 
| 27 | 
            +
                "102": {
         | 
| 28 | 
            +
                  "content": "[SEP]",
         | 
| 29 | 
             
                  "lstrip": false,
         | 
| 30 | 
             
                  "normalized": false,
         | 
| 31 | 
             
                  "rstrip": false,
         | 
| 32 | 
             
                  "single_word": false,
         | 
| 33 | 
             
                  "special": true
         | 
| 34 | 
             
                },
         | 
| 35 | 
            +
                "103": {
         | 
| 36 | 
            +
                  "content": "[MASK]",
         | 
| 37 | 
            +
                  "lstrip": false,
         | 
| 38 | 
             
                  "normalized": false,
         | 
| 39 | 
             
                  "rstrip": false,
         | 
| 40 | 
             
                  "single_word": false,
         | 
| 41 | 
             
                  "special": true
         | 
| 42 | 
             
                }
         | 
| 43 | 
             
              },
         | 
|  | |
| 44 | 
             
              "clean_up_tokenization_spaces": true,
         | 
| 45 | 
            +
              "cls_token": "[CLS]",
         | 
| 46 | 
             
              "do_basic_tokenize": true,
         | 
| 47 | 
             
              "do_lower_case": true,
         | 
| 48 | 
            +
              "mask_token": "[MASK]",
         | 
|  | |
| 49 | 
             
              "model_max_length": 512,
         | 
| 50 | 
             
              "never_split": null,
         | 
| 51 | 
            +
              "pad_token": "[PAD]",
         | 
| 52 | 
            +
              "sep_token": "[SEP]",
         | 
| 53 | 
             
              "strip_accents": null,
         | 
| 54 | 
             
              "tokenize_chinese_chars": true,
         | 
| 55 | 
            +
              "tokenizer_class": "BertTokenizer",
         | 
| 56 | 
             
              "unk_token": "[UNK]"
         | 
| 57 | 
             
            }
         | 
    	
        vocab.txt
    CHANGED
    
    | @@ -1,7 +1,3 @@ | |
| 1 | 
            -
            <s>
         | 
| 2 | 
            -
            <pad>
         | 
| 3 | 
            -
            </s>
         | 
| 4 | 
            -
            <unk>
         | 
| 5 | 
             
            [PAD]
         | 
| 6 | 
             
            [unused0]
         | 
| 7 | 
             
            [unused1]
         | 
| @@ -30524,4 +30520,3 @@ necessitated | |
| 30524 | 
             
            ##:
         | 
| 30525 | 
             
            ##?
         | 
| 30526 | 
             
            ##~
         | 
| 30527 | 
            -
            <mask>
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 1 | 
             
            [PAD]
         | 
| 2 | 
             
            [unused0]
         | 
| 3 | 
             
            [unused1]
         | 
|  | |
| 30520 | 
             
            ##:
         | 
| 30521 | 
             
            ##?
         | 
| 30522 | 
             
            ##~
         | 
|  | 
