Training in progress, step 6000, checkpoint
Browse files- checkpoint-6000/README.md +34 -0
- checkpoint-6000/adapter_config.json +20 -0
- checkpoint-6000/adapter_model.bin +3 -0
- checkpoint-6000/optimizer.pt +3 -0
- checkpoint-6000/rng_state.pth +3 -0
- checkpoint-6000/scheduler.pt +3 -0
- checkpoint-6000/special_tokens_map.json +11 -0
- checkpoint-6000/tokenizer.json +0 -0
- checkpoint-6000/tokenizer_config.json +72 -0
- checkpoint-6000/trainer_state.json +187 -0
- checkpoint-6000/training_args.bin +3 -0
checkpoint-6000/README.md
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- quant_method: bitsandbytes
|
9 |
+
- load_in_8bit: True
|
10 |
+
- load_in_4bit: False
|
11 |
+
- llm_int8_threshold: 6.0
|
12 |
+
- llm_int8_skip_modules: None
|
13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
14 |
+
- llm_int8_has_fp16_weight: False
|
15 |
+
- bnb_4bit_quant_type: fp4
|
16 |
+
- bnb_4bit_use_double_quant: False
|
17 |
+
- bnb_4bit_compute_dtype: float32
|
18 |
+
|
19 |
+
The following `bitsandbytes` quantization config was used during training:
|
20 |
+
- quant_method: bitsandbytes
|
21 |
+
- load_in_8bit: True
|
22 |
+
- load_in_4bit: False
|
23 |
+
- llm_int8_threshold: 6.0
|
24 |
+
- llm_int8_skip_modules: None
|
25 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
26 |
+
- llm_int8_has_fp16_weight: False
|
27 |
+
- bnb_4bit_quant_type: fp4
|
28 |
+
- bnb_4bit_use_double_quant: False
|
29 |
+
- bnb_4bit_compute_dtype: float32
|
30 |
+
### Framework versions
|
31 |
+
|
32 |
+
- PEFT 0.5.0
|
33 |
+
|
34 |
+
- PEFT 0.5.0
|
checkpoint-6000/adapter_config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "EleutherAI/polyglot-ko-1.3b",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 32,
|
11 |
+
"lora_dropout": 0.05,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 16,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"query_key_value"
|
18 |
+
],
|
19 |
+
"task_type": "CAUSAL_LM"
|
20 |
+
}
|
checkpoint-6000/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:46ac31236862012b285a689fe282c9fcf24b3f7bbf34613ff5216fea41004d4a
|
3 |
+
size 12600958
|
checkpoint-6000/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c256a74acff57ba6fc4b956a541a69e510a4b34a68dfae36db196c55dc455977
|
3 |
+
size 25206586
|
checkpoint-6000/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9fbbce3a2e4293cbbbb4aa0e946405958b2c77a900313a3050783d28b1c807e
|
3 |
+
size 14308
|
checkpoint-6000/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fff7934a4185855e6e536ba55543a820079f14811f03d29272f9becf86c89cd8
|
3 |
+
size 1064
|
checkpoint-6000/special_tokens_map.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|endoftext|>",
|
4 |
+
"<|sep|>",
|
5 |
+
"<|acc|>",
|
6 |
+
"<|tel|>",
|
7 |
+
"<|rrn|>"
|
8 |
+
],
|
9 |
+
"eos_token": "<|endoftext|>",
|
10 |
+
"pad_token": "<|endoftext|>"
|
11 |
+
}
|
checkpoint-6000/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-6000/tokenizer_config.json
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "<|unused0|>",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"1": {
|
12 |
+
"content": "<|unused1|>",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"2": {
|
20 |
+
"content": "<|endoftext|>",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"3": {
|
28 |
+
"content": "<|sep|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"30000": {
|
36 |
+
"content": "<|acc|>",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
},
|
43 |
+
"30001": {
|
44 |
+
"content": "<|tel|>",
|
45 |
+
"lstrip": false,
|
46 |
+
"normalized": false,
|
47 |
+
"rstrip": false,
|
48 |
+
"single_word": false,
|
49 |
+
"special": true
|
50 |
+
},
|
51 |
+
"30002": {
|
52 |
+
"content": "<|rrn|>",
|
53 |
+
"lstrip": false,
|
54 |
+
"normalized": false,
|
55 |
+
"rstrip": false,
|
56 |
+
"single_word": false,
|
57 |
+
"special": true
|
58 |
+
}
|
59 |
+
},
|
60 |
+
"additional_special_tokens": [
|
61 |
+
"<|endoftext|>",
|
62 |
+
"<|sep|>",
|
63 |
+
"<|acc|>",
|
64 |
+
"<|tel|>",
|
65 |
+
"<|rrn|>"
|
66 |
+
],
|
67 |
+
"clean_up_tokenization_spaces": true,
|
68 |
+
"eos_token": "<|endoftext|>",
|
69 |
+
"model_max_length": 1000000000000000019884624838656,
|
70 |
+
"pad_token": "<|endoftext|>",
|
71 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
72 |
+
}
|
checkpoint-6000/trainer_state.json
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 11.583011583011583,
|
5 |
+
"eval_steps": 1000,
|
6 |
+
"global_step": 6000,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.58,
|
13 |
+
"learning_rate": 7.239382239382239e-07,
|
14 |
+
"loss": 2.3335,
|
15 |
+
"step": 300
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"epoch": 1.16,
|
19 |
+
"learning_rate": 1.4478764478764478e-06,
|
20 |
+
"loss": 2.322,
|
21 |
+
"step": 600
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"epoch": 1.74,
|
25 |
+
"learning_rate": 2.171814671814672e-06,
|
26 |
+
"loss": 2.268,
|
27 |
+
"step": 900
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 1.93,
|
31 |
+
"eval_loss": 2.2363617420196533,
|
32 |
+
"eval_runtime": 56.2328,
|
33 |
+
"eval_samples_per_second": 51.98,
|
34 |
+
"eval_steps_per_second": 3.254,
|
35 |
+
"step": 1000
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"epoch": 2.32,
|
39 |
+
"learning_rate": 2.8957528957528956e-06,
|
40 |
+
"loss": 2.2427,
|
41 |
+
"step": 1200
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"epoch": 2.9,
|
45 |
+
"learning_rate": 3.61969111969112e-06,
|
46 |
+
"loss": 2.2085,
|
47 |
+
"step": 1500
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"epoch": 3.47,
|
51 |
+
"learning_rate": 4.343629343629344e-06,
|
52 |
+
"loss": 2.1902,
|
53 |
+
"step": 1800
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"epoch": 3.86,
|
57 |
+
"eval_loss": 2.1604719161987305,
|
58 |
+
"eval_runtime": 56.4281,
|
59 |
+
"eval_samples_per_second": 51.8,
|
60 |
+
"eval_steps_per_second": 3.243,
|
61 |
+
"step": 2000
|
62 |
+
},
|
63 |
+
{
|
64 |
+
"epoch": 4.05,
|
65 |
+
"learning_rate": 5.067567567567568e-06,
|
66 |
+
"loss": 2.173,
|
67 |
+
"step": 2100
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"epoch": 4.63,
|
71 |
+
"learning_rate": 5.791505791505791e-06,
|
72 |
+
"loss": 2.1517,
|
73 |
+
"step": 2400
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"epoch": 5.21,
|
77 |
+
"learning_rate": 6.515444015444016e-06,
|
78 |
+
"loss": 2.135,
|
79 |
+
"step": 2700
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 5.79,
|
83 |
+
"learning_rate": 7.23938223938224e-06,
|
84 |
+
"loss": 2.1156,
|
85 |
+
"step": 3000
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"epoch": 5.79,
|
89 |
+
"eval_loss": 2.108267068862915,
|
90 |
+
"eval_runtime": 56.4468,
|
91 |
+
"eval_samples_per_second": 51.783,
|
92 |
+
"eval_steps_per_second": 3.242,
|
93 |
+
"step": 3000
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 6.37,
|
97 |
+
"learning_rate": 7.963320463320465e-06,
|
98 |
+
"loss": 2.1038,
|
99 |
+
"step": 3300
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"epoch": 6.95,
|
103 |
+
"learning_rate": 8.687258687258689e-06,
|
104 |
+
"loss": 2.1021,
|
105 |
+
"step": 3600
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"epoch": 7.53,
|
109 |
+
"learning_rate": 9.41119691119691e-06,
|
110 |
+
"loss": 2.0882,
|
111 |
+
"step": 3900
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"epoch": 7.72,
|
115 |
+
"eval_loss": 2.08085298538208,
|
116 |
+
"eval_runtime": 56.4755,
|
117 |
+
"eval_samples_per_second": 51.757,
|
118 |
+
"eval_steps_per_second": 3.24,
|
119 |
+
"step": 4000
|
120 |
+
},
|
121 |
+
{
|
122 |
+
"epoch": 8.11,
|
123 |
+
"learning_rate": 9.99549483102341e-06,
|
124 |
+
"loss": 2.0834,
|
125 |
+
"step": 4200
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"epoch": 8.69,
|
129 |
+
"learning_rate": 9.819006680234513e-06,
|
130 |
+
"loss": 2.0744,
|
131 |
+
"step": 4500
|
132 |
+
},
|
133 |
+
{
|
134 |
+
"epoch": 9.27,
|
135 |
+
"learning_rate": 9.394326698914229e-06,
|
136 |
+
"loss": 2.0565,
|
137 |
+
"step": 4800
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"epoch": 9.65,
|
141 |
+
"eval_loss": 2.062009811401367,
|
142 |
+
"eval_runtime": 56.2872,
|
143 |
+
"eval_samples_per_second": 51.93,
|
144 |
+
"eval_steps_per_second": 3.251,
|
145 |
+
"step": 5000
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"epoch": 9.85,
|
149 |
+
"learning_rate": 8.743327050549326e-06,
|
150 |
+
"loss": 2.0622,
|
151 |
+
"step": 5100
|
152 |
+
},
|
153 |
+
{
|
154 |
+
"epoch": 10.42,
|
155 |
+
"learning_rate": 7.899535971376881e-06,
|
156 |
+
"loss": 2.0509,
|
157 |
+
"step": 5400
|
158 |
+
},
|
159 |
+
{
|
160 |
+
"epoch": 11.0,
|
161 |
+
"learning_rate": 6.906410975864522e-06,
|
162 |
+
"loss": 2.0494,
|
163 |
+
"step": 5700
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"epoch": 11.58,
|
167 |
+
"learning_rate": 5.815100677603854e-06,
|
168 |
+
"loss": 2.0405,
|
169 |
+
"step": 6000
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"epoch": 11.58,
|
173 |
+
"eval_loss": 2.0517830848693848,
|
174 |
+
"eval_runtime": 56.5476,
|
175 |
+
"eval_samples_per_second": 51.691,
|
176 |
+
"eval_steps_per_second": 3.236,
|
177 |
+
"step": 6000
|
178 |
+
}
|
179 |
+
],
|
180 |
+
"logging_steps": 300,
|
181 |
+
"max_steps": 8288,
|
182 |
+
"num_train_epochs": 16,
|
183 |
+
"save_steps": 1000,
|
184 |
+
"total_flos": 5.200194349388022e+17,
|
185 |
+
"trial_name": null,
|
186 |
+
"trial_params": null
|
187 |
+
}
|
checkpoint-6000/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e3f853383e8dce9f00867dad3f3b4917bf63575060d38b299cdecb6532211b41
|
3 |
+
size 4536
|