Upload 22 files
Browse files- README.md +272 -0
- adapter_config (1).json +21 -0
- added_tokens.json +3 -0
- events.out.tfevents.1722267239.80c456c5842f.227.0 +3 -0
- events.out.tfevents.1722267421.80c456c5842f.227.1 +3 -0
- events.out.tfevents.1722267452.80c456c5842f.227.2 +3 -0
- events.out.tfevents.1722267484.80c456c5842f.227.3 +3 -0
- events.out.tfevents.1722267518.80c456c5842f.227.4 +3 -0
- events.out.tfevents.1722267551.80c456c5842f.227.5 +3 -0
- events.out.tfevents.1722267584.80c456c5842f.227.6 +3 -0
- events.out.tfevents.1722267620.80c456c5842f.227.7 +3 -0
- events.out.tfevents.1722267654.80c456c5842f.227.8 +3 -0
- events.out.tfevents.1722267690.80c456c5842f.227.9 +3 -0
- events.out.tfevents.1722267724.80c456c5842f.227.10 +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +32 -0
- trainer_state.json +22 -0
- training_args.bin +3 -0
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,272 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            library_name: peft
         | 
| 3 | 
            +
            ---
         | 
| 4 | 
            +
            ## Training procedure
         | 
| 5 | 
            +
             | 
| 6 | 
            +
             | 
| 7 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 8 | 
            +
            - load_in_8bit: False
         | 
| 9 | 
            +
            - load_in_4bit: True
         | 
| 10 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 11 | 
            +
            - llm_int8_skip_modules: None
         | 
| 12 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 13 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 14 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 15 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 16 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 19 | 
            +
            - load_in_8bit: False
         | 
| 20 | 
            +
            - load_in_4bit: True
         | 
| 21 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 22 | 
            +
            - llm_int8_skip_modules: None
         | 
| 23 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 24 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 25 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 26 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 27 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 28 | 
            +
             | 
| 29 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 30 | 
            +
            - load_in_8bit: False
         | 
| 31 | 
            +
            - load_in_4bit: True
         | 
| 32 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 33 | 
            +
            - llm_int8_skip_modules: None
         | 
| 34 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 35 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 36 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 37 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 38 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 41 | 
            +
            - load_in_8bit: False
         | 
| 42 | 
            +
            - load_in_4bit: True
         | 
| 43 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 44 | 
            +
            - llm_int8_skip_modules: None
         | 
| 45 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 46 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 47 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 48 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 49 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 52 | 
            +
            - load_in_8bit: False
         | 
| 53 | 
            +
            - load_in_4bit: True
         | 
| 54 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 55 | 
            +
            - llm_int8_skip_modules: None
         | 
| 56 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 57 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 58 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 59 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 60 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 61 | 
            +
             | 
| 62 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 63 | 
            +
            - load_in_8bit: False
         | 
| 64 | 
            +
            - load_in_4bit: True
         | 
| 65 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 66 | 
            +
            - llm_int8_skip_modules: None
         | 
| 67 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 68 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 69 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 70 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 71 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 72 | 
            +
             | 
| 73 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 74 | 
            +
            - load_in_8bit: False
         | 
| 75 | 
            +
            - load_in_4bit: True
         | 
| 76 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 77 | 
            +
            - llm_int8_skip_modules: None
         | 
| 78 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 79 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 80 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 81 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 82 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 83 | 
            +
             | 
| 84 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 85 | 
            +
            - load_in_8bit: False
         | 
| 86 | 
            +
            - load_in_4bit: True
         | 
| 87 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 88 | 
            +
            - llm_int8_skip_modules: None
         | 
| 89 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 90 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 91 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 92 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 93 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 94 | 
            +
             | 
| 95 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 96 | 
            +
            - load_in_8bit: False
         | 
| 97 | 
            +
            - load_in_4bit: True
         | 
| 98 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 99 | 
            +
            - llm_int8_skip_modules: None
         | 
| 100 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 101 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 102 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 103 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 104 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 105 | 
            +
             | 
| 106 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 107 | 
            +
            - load_in_8bit: False
         | 
| 108 | 
            +
            - load_in_4bit: True
         | 
| 109 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 110 | 
            +
            - llm_int8_skip_modules: None
         | 
| 111 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 112 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 113 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 114 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 115 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 116 | 
            +
             | 
| 117 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 118 | 
            +
            - load_in_8bit: False
         | 
| 119 | 
            +
            - load_in_4bit: True
         | 
| 120 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 121 | 
            +
            - llm_int8_skip_modules: None
         | 
| 122 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 123 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 124 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 125 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 126 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 127 | 
            +
             | 
| 128 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 129 | 
            +
            - load_in_8bit: False
         | 
| 130 | 
            +
            - load_in_4bit: True
         | 
| 131 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 132 | 
            +
            - llm_int8_skip_modules: None
         | 
| 133 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 134 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 135 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 136 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 137 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 138 | 
            +
             | 
| 139 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 140 | 
            +
            - load_in_8bit: False
         | 
| 141 | 
            +
            - load_in_4bit: True
         | 
| 142 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 143 | 
            +
            - llm_int8_skip_modules: None
         | 
| 144 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 145 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 146 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 147 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 148 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 149 | 
            +
             | 
| 150 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 151 | 
            +
            - load_in_8bit: False
         | 
| 152 | 
            +
            - load_in_4bit: True
         | 
| 153 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 154 | 
            +
            - llm_int8_skip_modules: None
         | 
| 155 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 156 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 157 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 158 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 159 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 160 | 
            +
             | 
| 161 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 162 | 
            +
            - load_in_8bit: False
         | 
| 163 | 
            +
            - load_in_4bit: True
         | 
| 164 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 165 | 
            +
            - llm_int8_skip_modules: None
         | 
| 166 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 167 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 168 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 169 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 170 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 171 | 
            +
             | 
| 172 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 173 | 
            +
            - load_in_8bit: False
         | 
| 174 | 
            +
            - load_in_4bit: True
         | 
| 175 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 176 | 
            +
            - llm_int8_skip_modules: None
         | 
| 177 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 178 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 179 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 180 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 181 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 182 | 
            +
             | 
| 183 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 184 | 
            +
            - load_in_8bit: False
         | 
| 185 | 
            +
            - load_in_4bit: True
         | 
| 186 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 187 | 
            +
            - llm_int8_skip_modules: None
         | 
| 188 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 189 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 190 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 191 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 192 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 193 | 
            +
             | 
| 194 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 195 | 
            +
            - load_in_8bit: False
         | 
| 196 | 
            +
            - load_in_4bit: True
         | 
| 197 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 198 | 
            +
            - llm_int8_skip_modules: None
         | 
| 199 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 200 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 201 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 202 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 203 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 204 | 
            +
             | 
| 205 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 206 | 
            +
            - load_in_8bit: False
         | 
| 207 | 
            +
            - load_in_4bit: True
         | 
| 208 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 209 | 
            +
            - llm_int8_skip_modules: None
         | 
| 210 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 211 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 212 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 213 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 214 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 215 | 
            +
             | 
| 216 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 217 | 
            +
            - load_in_8bit: False
         | 
| 218 | 
            +
            - load_in_4bit: True
         | 
| 219 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 220 | 
            +
            - llm_int8_skip_modules: None
         | 
| 221 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 222 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 223 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 224 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 225 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 226 | 
            +
             | 
| 227 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 228 | 
            +
            - load_in_8bit: False
         | 
| 229 | 
            +
            - load_in_4bit: True
         | 
| 230 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 231 | 
            +
            - llm_int8_skip_modules: None
         | 
| 232 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 233 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 234 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 235 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 236 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 237 | 
            +
             | 
| 238 | 
            +
            The following `bitsandbytes` quantization config was used during training:
         | 
| 239 | 
            +
            - load_in_8bit: False
         | 
| 240 | 
            +
            - load_in_4bit: True
         | 
| 241 | 
            +
            - llm_int8_threshold: 6.0
         | 
| 242 | 
            +
            - llm_int8_skip_modules: None
         | 
| 243 | 
            +
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 244 | 
            +
            - llm_int8_has_fp16_weight: False
         | 
| 245 | 
            +
            - bnb_4bit_quant_type: nf4
         | 
| 246 | 
            +
            - bnb_4bit_use_double_quant: False
         | 
| 247 | 
            +
            - bnb_4bit_compute_dtype: float16
         | 
| 248 | 
            +
            ### Framework versions
         | 
| 249 | 
            +
             | 
| 250 | 
            +
            - PEFT 0.4.0
         | 
| 251 | 
            +
            - PEFT 0.4.0
         | 
| 252 | 
            +
            - PEFT 0.4.0
         | 
| 253 | 
            +
            - PEFT 0.4.0
         | 
| 254 | 
            +
            - PEFT 0.4.0
         | 
| 255 | 
            +
            - PEFT 0.4.0
         | 
| 256 | 
            +
            - PEFT 0.4.0
         | 
| 257 | 
            +
            - PEFT 0.4.0
         | 
| 258 | 
            +
            - PEFT 0.4.0
         | 
| 259 | 
            +
            - PEFT 0.4.0
         | 
| 260 | 
            +
            - PEFT 0.4.0
         | 
| 261 | 
            +
            - PEFT 0.4.0
         | 
| 262 | 
            +
            - PEFT 0.4.0
         | 
| 263 | 
            +
            - PEFT 0.4.0
         | 
| 264 | 
            +
            - PEFT 0.4.0
         | 
| 265 | 
            +
            - PEFT 0.4.0
         | 
| 266 | 
            +
            - PEFT 0.4.0
         | 
| 267 | 
            +
            - PEFT 0.4.0
         | 
| 268 | 
            +
            - PEFT 0.4.0
         | 
| 269 | 
            +
            - PEFT 0.4.0
         | 
| 270 | 
            +
            - PEFT 0.4.0
         | 
| 271 | 
            +
             | 
| 272 | 
            +
            - PEFT 0.4.0
         | 
    	
        adapter_config (1).json
    ADDED
    
    | @@ -0,0 +1,21 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "auto_mapping": null,
         | 
| 3 | 
            +
              "base_model_name_or_path": "NousResearch/Llama-2-7b-hf",
         | 
| 4 | 
            +
              "bias": "none",
         | 
| 5 | 
            +
              "fan_in_fan_out": false,
         | 
| 6 | 
            +
              "inference_mode": true,
         | 
| 7 | 
            +
              "init_lora_weights": true,
         | 
| 8 | 
            +
              "layers_pattern": null,
         | 
| 9 | 
            +
              "layers_to_transform": null,
         | 
| 10 | 
            +
              "lora_alpha": 16,
         | 
| 11 | 
            +
              "lora_dropout": 0.1,
         | 
| 12 | 
            +
              "modules_to_save": null,
         | 
| 13 | 
            +
              "peft_type": "LORA",
         | 
| 14 | 
            +
              "r": 64,
         | 
| 15 | 
            +
              "revision": null,
         | 
| 16 | 
            +
              "target_modules": [
         | 
| 17 | 
            +
                "q_proj",
         | 
| 18 | 
            +
                "v_proj"
         | 
| 19 | 
            +
              ],
         | 
| 20 | 
            +
              "task_type": "CAUSAL_LM"
         | 
| 21 | 
            +
            }
         | 
    	
        added_tokens.json
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "[PAD]": 32000
         | 
| 3 | 
            +
            }
         | 
    	
        events.out.tfevents.1722267239.80c456c5842f.227.0
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:1ca4f74b3b549bab3289542aacef46b49070444193008ee98724376f1b70d633
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267421.80c456c5842f.227.1
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:1255641916b7e3cd0ebbb068dc2c6f483fbf3bfda4be2965ffac367a9b3999e3
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267452.80c456c5842f.227.2
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:330268798579818e8857346e0fffd88ebd75e1a4126820fea2dae404f903a021
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267484.80c456c5842f.227.3
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:755384a204abf027f11865b0c3929698a6410b4b01db5b19c0eba99e667dea82
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267518.80c456c5842f.227.4
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0276d336a75e3bd1a64ba38658819755f68e13ddee48ef07499b7387a6d1d161
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267551.80c456c5842f.227.5
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:3251b8fb5490bdc05da3175f211c309640aa91ef9c5ef6bfdbfb89fd31e98366
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267584.80c456c5842f.227.6
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:f3566efdef95ea5044f7fbab573241dc224a10a52dbbcb3a16004d48b5c9846e
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267620.80c456c5842f.227.7
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e29793623dc40b74d749e1c462e9bf52987a4f37fbd2000b3f2513f05777f483
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267654.80c456c5842f.227.8
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:2f019f8b7540ed35252dcf042a366d0e99db509ddb672909e87073a908e117c1
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267690.80c456c5842f.227.9
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0a32e0d06e654b8c24cc8b38aa0b4a99d1c1bfdc6ed38e71b3fc016a93da4f24
         | 
| 3 | 
            +
            size 4870
         | 
    	
        events.out.tfevents.1722267724.80c456c5842f.227.10
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:4480edc8726627fa108f0e4e3439479b62b15e1864f6f481c97475d271e11e5d
         | 
| 3 | 
            +
            size 4870
         | 
    	
        rng_state.pth
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:35eef56506369582ae2d17896acf38810c628af294bd5a59628c5a37bee5b18c
         | 
| 3 | 
            +
            size 14244
         | 
    	
        scheduler.pt
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:e93866edad3fbdd47247cb07511e76176c859f76e1587ddf720e22782b6ae462
         | 
| 3 | 
            +
            size 1064
         | 
    	
        special_tokens_map.json
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": {
         | 
| 3 | 
            +
                "content": "<s>",
         | 
| 4 | 
            +
                "lstrip": false,
         | 
| 5 | 
            +
                "normalized": true,
         | 
| 6 | 
            +
                "rstrip": false,
         | 
| 7 | 
            +
                "single_word": false
         | 
| 8 | 
            +
              },
         | 
| 9 | 
            +
              "eos_token": {
         | 
| 10 | 
            +
                "content": "</s>",
         | 
| 11 | 
            +
                "lstrip": false,
         | 
| 12 | 
            +
                "normalized": true,
         | 
| 13 | 
            +
                "rstrip": false,
         | 
| 14 | 
            +
                "single_word": false
         | 
| 15 | 
            +
              },
         | 
| 16 | 
            +
              "pad_token": "</s>",
         | 
| 17 | 
            +
              "unk_token": {
         | 
| 18 | 
            +
                "content": "<unk>",
         | 
| 19 | 
            +
                "lstrip": false,
         | 
| 20 | 
            +
                "normalized": true,
         | 
| 21 | 
            +
                "rstrip": false,
         | 
| 22 | 
            +
                "single_word": false
         | 
| 23 | 
            +
              }
         | 
| 24 | 
            +
            }
         | 
    	
        tokenizer.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        tokenizer.model
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
         | 
| 3 | 
            +
            size 499723
         | 
    	
        tokenizer_config.json
    ADDED
    
    | @@ -0,0 +1,32 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "bos_token": {
         | 
| 3 | 
            +
                "__type": "AddedToken",
         | 
| 4 | 
            +
                "content": "<s>",
         | 
| 5 | 
            +
                "lstrip": false,
         | 
| 6 | 
            +
                "normalized": true,
         | 
| 7 | 
            +
                "rstrip": false,
         | 
| 8 | 
            +
                "single_word": false
         | 
| 9 | 
            +
              },
         | 
| 10 | 
            +
              "clean_up_tokenization_spaces": false,
         | 
| 11 | 
            +
              "eos_token": {
         | 
| 12 | 
            +
                "__type": "AddedToken",
         | 
| 13 | 
            +
                "content": "</s>",
         | 
| 14 | 
            +
                "lstrip": false,
         | 
| 15 | 
            +
                "normalized": true,
         | 
| 16 | 
            +
                "rstrip": false,
         | 
| 17 | 
            +
                "single_word": false
         | 
| 18 | 
            +
              },
         | 
| 19 | 
            +
              "legacy": false,
         | 
| 20 | 
            +
              "model_max_length": 1000000000000000019884624838656,
         | 
| 21 | 
            +
              "pad_token": null,
         | 
| 22 | 
            +
              "sp_model_kwargs": {},
         | 
| 23 | 
            +
              "tokenizer_class": "LlamaTokenizer",
         | 
| 24 | 
            +
              "unk_token": {
         | 
| 25 | 
            +
                "__type": "AddedToken",
         | 
| 26 | 
            +
                "content": "<unk>",
         | 
| 27 | 
            +
                "lstrip": false,
         | 
| 28 | 
            +
                "normalized": true,
         | 
| 29 | 
            +
                "rstrip": false,
         | 
| 30 | 
            +
                "single_word": false
         | 
| 31 | 
            +
              }
         | 
| 32 | 
            +
            }
         | 
    	
        trainer_state.json
    ADDED
    
    | @@ -0,0 +1,22 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "best_metric": null,
         | 
| 3 | 
            +
              "best_model_checkpoint": null,
         | 
| 4 | 
            +
              "epoch": 0.8928571428571429,
         | 
| 5 | 
            +
              "global_step": 25,
         | 
| 6 | 
            +
              "is_hyper_param_search": false,
         | 
| 7 | 
            +
              "is_local_process_zero": true,
         | 
| 8 | 
            +
              "is_world_process_zero": true,
         | 
| 9 | 
            +
              "log_history": [
         | 
| 10 | 
            +
                {
         | 
| 11 | 
            +
                  "epoch": 0.89,
         | 
| 12 | 
            +
                  "learning_rate": 0.0002,
         | 
| 13 | 
            +
                  "loss": 0.2144,
         | 
| 14 | 
            +
                  "step": 25
         | 
| 15 | 
            +
                }
         | 
| 16 | 
            +
              ],
         | 
| 17 | 
            +
              "max_steps": 28,
         | 
| 18 | 
            +
              "num_train_epochs": 1,
         | 
| 19 | 
            +
              "total_flos": 99800115118080.0,
         | 
| 20 | 
            +
              "trial_name": null,
         | 
| 21 | 
            +
              "trial_params": null
         | 
| 22 | 
            +
            }
         | 
    	
        training_args.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:7a7d61465862e376be269103c031e3985fe834c7fbe2e5b50fdcd4088c928d69
         | 
| 3 | 
            +
            size 4408
         |