Upload folder using huggingface_hub
Browse files- README.md +46 -0
- adapter_config.json +34 -0
- adapter_model.safetensors +3 -0
- added_tokens.json +5 -0
- merges.txt +0 -0
- runs/Aug16_12-51-57_11e19563ecdf/events.out.tfevents.1723812784.11e19563ecdf.4265.0 +2 -2
- special_tokens_map.json +20 -0
- tokenizer.json +0 -0
- tokenizer_config.json +43 -0
- training_args.bin +3 -0
- training_params.json +48 -0
- vocab.json +0 -0
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,46 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            tags:
         | 
| 3 | 
            +
            - autotrain
         | 
| 4 | 
            +
            - text-generation-inference
         | 
| 5 | 
            +
            - text-generation
         | 
| 6 | 
            +
            - peft
         | 
| 7 | 
            +
            library_name: transformers
         | 
| 8 | 
            +
            base_model: Qwen/Qwen2-7B-Instruct
         | 
| 9 | 
            +
            widget:
         | 
| 10 | 
            +
              - messages:
         | 
| 11 | 
            +
                  - role: user
         | 
| 12 | 
            +
                    content: What is your favorite condiment?
         | 
| 13 | 
            +
            license: other
         | 
| 14 | 
            +
            ---
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # Model Trained Using AutoTrain
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            This model was trained using AutoTrain. For more information, please visit [AutoTrain](https://hf.co/docs/autotrain).
         | 
| 19 | 
            +
             | 
| 20 | 
            +
            # Usage
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            ```python
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            from transformers import AutoModelForCausalLM, AutoTokenizer
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            model_path = "PATH_TO_THIS_REPO"
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            tokenizer = AutoTokenizer.from_pretrained(model_path)
         | 
| 29 | 
            +
            model = AutoModelForCausalLM.from_pretrained(
         | 
| 30 | 
            +
                model_path,
         | 
| 31 | 
            +
                device_map="auto",
         | 
| 32 | 
            +
                torch_dtype='auto'
         | 
| 33 | 
            +
            ).eval()
         | 
| 34 | 
            +
             | 
| 35 | 
            +
            # Prompt content: "hi"
         | 
| 36 | 
            +
            messages = [
         | 
| 37 | 
            +
                {"role": "user", "content": "hi"}
         | 
| 38 | 
            +
            ]
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt')
         | 
| 41 | 
            +
            output_ids = model.generate(input_ids.to('cuda'))
         | 
| 42 | 
            +
            response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            # Model response: "Hello! How can I assist you today?"
         | 
| 45 | 
            +
            print(response)
         | 
| 46 | 
            +
            ```
         | 
    	
        adapter_config.json
    ADDED
    
    | @@ -0,0 +1,34 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "alpha_pattern": {},
         | 
| 3 | 
            +
              "auto_mapping": null,
         | 
| 4 | 
            +
              "base_model_name_or_path": "unsloth/qwen2-7b-instruct-bnb-4bit",
         | 
| 5 | 
            +
              "bias": "none",
         | 
| 6 | 
            +
              "fan_in_fan_out": false,
         | 
| 7 | 
            +
              "inference_mode": true,
         | 
| 8 | 
            +
              "init_lora_weights": true,
         | 
| 9 | 
            +
              "layer_replication": null,
         | 
| 10 | 
            +
              "layers_pattern": null,
         | 
| 11 | 
            +
              "layers_to_transform": null,
         | 
| 12 | 
            +
              "loftq_config": {},
         | 
| 13 | 
            +
              "lora_alpha": 32,
         | 
| 14 | 
            +
              "lora_dropout": 0.05,
         | 
| 15 | 
            +
              "megatron_config": null,
         | 
| 16 | 
            +
              "megatron_core": "megatron.core",
         | 
| 17 | 
            +
              "modules_to_save": null,
         | 
| 18 | 
            +
              "peft_type": "LORA",
         | 
| 19 | 
            +
              "r": 16,
         | 
| 20 | 
            +
              "rank_pattern": {},
         | 
| 21 | 
            +
              "revision": null,
         | 
| 22 | 
            +
              "target_modules": [
         | 
| 23 | 
            +
                "down_proj",
         | 
| 24 | 
            +
                "o_proj",
         | 
| 25 | 
            +
                "q_proj",
         | 
| 26 | 
            +
                "v_proj",
         | 
| 27 | 
            +
                "gate_proj",
         | 
| 28 | 
            +
                "up_proj",
         | 
| 29 | 
            +
                "k_proj"
         | 
| 30 | 
            +
              ],
         | 
| 31 | 
            +
              "task_type": "CAUSAL_LM",
         | 
| 32 | 
            +
              "use_dora": false,
         | 
| 33 | 
            +
              "use_rslora": false
         | 
| 34 | 
            +
            }
         | 
    	
        adapter_model.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:600253a626882177e1d650622d05ae469919ee10f33fcc96db8933eeb8681e53
         | 
| 3 | 
            +
            size 161533192
         | 
    	
        added_tokens.json
    ADDED
    
    | @@ -0,0 +1,5 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "<|endoftext|>": 151643,
         | 
| 3 | 
            +
              "<|im_end|>": 151645,
         | 
| 4 | 
            +
              "<|im_start|>": 151644
         | 
| 5 | 
            +
            }
         | 
    	
        merges.txt
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        runs/Aug16_12-51-57_11e19563ecdf/events.out.tfevents.1723812784.11e19563ecdf.4265.0
    CHANGED
    
    | @@ -1,3 +1,3 @@ | |
| 1 | 
             
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256: | 
| 3 | 
            -
            size  | 
|  | |
| 1 | 
             
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:c12ca31f074ffa8c3fadc0a1a09f4870dcc9586f2d2b94cd39ab675cfc165bfc
         | 
| 3 | 
            +
            size 8150
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,20 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "additional_special_tokens": [
         | 
| 3 | 
            +
                "<|im_start|>",
         | 
| 4 | 
            +
                "<|im_end|>"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "eos_token": {
         | 
| 7 | 
            +
                "content": "<|im_end|>",
         | 
| 8 | 
            +
                "lstrip": false,
         | 
| 9 | 
            +
                "normalized": false,
         | 
| 10 | 
            +
                "rstrip": false,
         | 
| 11 | 
            +
                "single_word": false
         | 
| 12 | 
            +
              },
         | 
| 13 | 
            +
              "pad_token": {
         | 
| 14 | 
            +
                "content": "<|endoftext|>",
         | 
| 15 | 
            +
                "lstrip": false,
         | 
| 16 | 
            +
                "normalized": false,
         | 
| 17 | 
            +
                "rstrip": false,
         | 
| 18 | 
            +
                "single_word": false
         | 
| 19 | 
            +
              }
         | 
| 20 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,43 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "add_prefix_space": false,
         | 
| 3 | 
            +
              "added_tokens_decoder": {
         | 
| 4 | 
            +
                "151643": {
         | 
| 5 | 
            +
                  "content": "<|endoftext|>",
         | 
| 6 | 
            +
                  "lstrip": false,
         | 
| 7 | 
            +
                  "normalized": false,
         | 
| 8 | 
            +
                  "rstrip": false,
         | 
| 9 | 
            +
                  "single_word": false,
         | 
| 10 | 
            +
                  "special": true
         | 
| 11 | 
            +
                },
         | 
| 12 | 
            +
                "151644": {
         | 
| 13 | 
            +
                  "content": "<|im_start|>",
         | 
| 14 | 
            +
                  "lstrip": false,
         | 
| 15 | 
            +
                  "normalized": false,
         | 
| 16 | 
            +
                  "rstrip": false,
         | 
| 17 | 
            +
                  "single_word": false,
         | 
| 18 | 
            +
                  "special": true
         | 
| 19 | 
            +
                },
         | 
| 20 | 
            +
                "151645": {
         | 
| 21 | 
            +
                  "content": "<|im_end|>",
         | 
| 22 | 
            +
                  "lstrip": false,
         | 
| 23 | 
            +
                  "normalized": false,
         | 
| 24 | 
            +
                  "rstrip": false,
         | 
| 25 | 
            +
                  "single_word": false,
         | 
| 26 | 
            +
                  "special": true
         | 
| 27 | 
            +
                }
         | 
| 28 | 
            +
              },
         | 
| 29 | 
            +
              "additional_special_tokens": [
         | 
| 30 | 
            +
                "<|im_start|>",
         | 
| 31 | 
            +
                "<|im_end|>"
         | 
| 32 | 
            +
              ],
         | 
| 33 | 
            +
              "bos_token": null,
         | 
| 34 | 
            +
              "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
         | 
| 35 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 36 | 
            +
              "eos_token": "<|im_end|>",
         | 
| 37 | 
            +
              "errors": "replace",
         | 
| 38 | 
            +
              "model_max_length": 2048,
         | 
| 39 | 
            +
              "pad_token": "<|endoftext|>",
         | 
| 40 | 
            +
              "split_special_tokens": false,
         | 
| 41 | 
            +
              "tokenizer_class": "Qwen2Tokenizer",
         | 
| 42 | 
            +
              "unk_token": null
         | 
| 43 | 
            +
            }
         | 
    	
        training_args.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:392b1a1bd5a1d27431bd4aa8e665919b1c7ee5aef8240387b4df38aa4c6a3a6a
         | 
| 3 | 
            +
            size 5432
         | 
    	
        training_params.json
    ADDED
    
    | @@ -0,0 +1,48 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
                "model": "Qwen/Qwen2-7B-Instruct",
         | 
| 3 | 
            +
                "project_name": "qwen2-5-sdgs",
         | 
| 4 | 
            +
                "data_path": "/content/data",
         | 
| 5 | 
            +
                "train_split": "train",
         | 
| 6 | 
            +
                "valid_split": null,
         | 
| 7 | 
            +
                "add_eos_token": true,
         | 
| 8 | 
            +
                "block_size": 1024,
         | 
| 9 | 
            +
                "model_max_length": 2048,
         | 
| 10 | 
            +
                "padding": "right",
         | 
| 11 | 
            +
                "trainer": "sft",
         | 
| 12 | 
            +
                "use_flash_attention_2": false,
         | 
| 13 | 
            +
                "log": "tensorboard",
         | 
| 14 | 
            +
                "disable_gradient_checkpointing": false,
         | 
| 15 | 
            +
                "logging_steps": -1,
         | 
| 16 | 
            +
                "eval_strategy": "epoch",
         | 
| 17 | 
            +
                "save_total_limit": 1,
         | 
| 18 | 
            +
                "auto_find_batch_size": false,
         | 
| 19 | 
            +
                "mixed_precision": "fp16",
         | 
| 20 | 
            +
                "lr": 0.0002,
         | 
| 21 | 
            +
                "epochs": 5,
         | 
| 22 | 
            +
                "batch_size": 1,
         | 
| 23 | 
            +
                "warmup_ratio": 0.1,
         | 
| 24 | 
            +
                "gradient_accumulation": 4,
         | 
| 25 | 
            +
                "optimizer": "adamw_torch",
         | 
| 26 | 
            +
                "scheduler": "linear",
         | 
| 27 | 
            +
                "weight_decay": 0.01,
         | 
| 28 | 
            +
                "max_grad_norm": 1.0,
         | 
| 29 | 
            +
                "seed": 42,
         | 
| 30 | 
            +
                "chat_template": null,
         | 
| 31 | 
            +
                "quantization": "int4",
         | 
| 32 | 
            +
                "target_modules": "all-linear",
         | 
| 33 | 
            +
                "merge_adapter": false,
         | 
| 34 | 
            +
                "peft": true,
         | 
| 35 | 
            +
                "lora_r": 16,
         | 
| 36 | 
            +
                "lora_alpha": 32,
         | 
| 37 | 
            +
                "lora_dropout": 0.05,
         | 
| 38 | 
            +
                "model_ref": null,
         | 
| 39 | 
            +
                "dpo_beta": 0.1,
         | 
| 40 | 
            +
                "max_prompt_length": 128,
         | 
| 41 | 
            +
                "max_completion_length": null,
         | 
| 42 | 
            +
                "prompt_text_column": null,
         | 
| 43 | 
            +
                "text_column": "text",
         | 
| 44 | 
            +
                "rejected_text_column": null,
         | 
| 45 | 
            +
                "push_to_hub": true,
         | 
| 46 | 
            +
                "username": "Punthon",
         | 
| 47 | 
            +
                "unsloth": true
         | 
| 48 | 
            +
            }
         | 
    	
        vocab.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
