Word2Li commited on
Commit
0add802
·
verified ·
1 Parent(s): 8da93cd

Upload model

Browse files
README.md ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: mistralai/Mistral-7B-vp0.3
5
+ language: en
6
+ datasets:
7
+ - Word2Li/MiddOptimized
8
+ tags:
9
+ - llama-factory
10
+ - full
11
+ pipeline_tag: text-generation
12
+ model-index:
13
+ - name: Mistral-7B-v0.3-Middo-Alpaca-4o-mini
14
+ results:
15
+ - task:
16
+ type: text-generation
17
+ dataset:
18
+ name: MMLU
19
+ type: MMLU
20
+ metrics:
21
+ - name: weighted accuracy
22
+ type: weighted accuracy
23
+ value: 43.26
24
+ verified: true
25
+ - task:
26
+ type: text-generation
27
+ dataset:
28
+ name: IFEval
29
+ type: IFEval
30
+ metrics:
31
+ - name: overall accuracy
32
+ type: overall accuracy
33
+ value: 49.80
34
+ verified: true
35
+ - task:
36
+ type: text-generation
37
+ dataset:
38
+ name: GSM8K
39
+ type: GSM8K
40
+ metrics:
41
+ - name: accuracy
42
+ type: accuracy
43
+ value: 41.09
44
+ verified: true
45
+ - task:
46
+ type: text-generation
47
+ dataset:
48
+ name: MATH
49
+ type: MATH
50
+ metrics:
51
+ - name: accuracy
52
+ type: accuracy
53
+ value: 10.02
54
+ verified: true
55
+ - task:
56
+ type: text-generation
57
+ dataset:
58
+ name: HumanEval
59
+ type: HumanEval
60
+ metrics:
61
+ - name: humaneval_pass@1
62
+ type: humaneval_pass@1
63
+ value: 41.46
64
+ verified: true
65
+ - task:
66
+ type: text-generation
67
+ dataset:
68
+ name: MBPP
69
+ type: MBPP
70
+ metrics:
71
+ - name: score
72
+ type: score
73
+ value: 34.60
74
+ verified: true
75
+ - task:
76
+ type: text-generation
77
+ dataset:
78
+ name: Hellaswag
79
+ type: Hellaswag
80
+ metrics:
81
+ - name: accuracy
82
+ type: accuracy
83
+ value: 66.02
84
+ verified: true
85
+ - task:
86
+ type: text-generation
87
+ dataset:
88
+ name: GPQA
89
+ type: GPQA
90
+ metrics:
91
+ - name: accuracy
92
+ type: accuracy
93
+ value: 22.22
94
+ verified: true
95
+ metrics:
96
+ - accuracy
97
+ ---
98
+
99
+ # Mistral-7B-v0.3-Middo-WizardLM
100
+
101
+ Paper: [Middo: Model-Informed Dynamic Data Optimization for Enhanced LLM Fine-Tuning via Closed-Loop Learning](https://arxiv.org/abs/2508.21589)
102
+
103
+ Code: https://github.com/Word2VecT/Middo
104
+
105
+ ## Model description
106
+
107
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3) on the [MiddOptimzed/mistral_wizard](https://huggingface.co/datasets/Word2Li/MiddOptimized/viewer/default/mistral_wizard) dataset.
108
+
109
+ ## Training and evaluation data
110
+
111
+ ### Training data
112
+
113
+ Middo optimized [WizardLMTeam/WizardLM_evol_instruct_70k](https://huggingface.co/datasets/WizardLMTeam/WizardLM_evol_instruct_70k) on [mistralai/Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3).
114
+
115
+ ### Evaluation data
116
+
117
+ - General
118
+ - MMLU
119
+ - IFEval
120
+ - Math
121
+ - GSM8K
122
+ - MATH
123
+ - Code
124
+ - HumanEval
125
+ - MBPP
126
+ - Reasoning
127
+ - Hellaswag
128
+ - GPQA
129
+
130
+ ## Training procedure
131
+
132
+ ### Training hyperparameters
133
+
134
+ The following hyperparameters were used during training:
135
+
136
+ - learning_rate: 1e-05
137
+ - train_batch_size: 4
138
+ - eval_batch_size: 8
139
+ - seed: 42
140
+ - distributed_type: multi-GPU
141
+ - num_devices: 8
142
+ - gradient_accumulation_steps: 8
143
+ - total_train_batch_size: 256
144
+ - total_eval_batch_size: 64
145
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
146
+ - lr_scheduler_type: cosine
147
+ - lr_scheduler_warmup_ratio: 0.03
148
+ - num_epochs: 1.0
149
+
150
+ ### Training results
151
+
152
+ - epoch: 1.0
153
+ - total_flos: 4.871785990877872e+18
154
+ - train_loss: 0.6260631282554998
155
+ - train_runtime: 6928.3413
156
+ - train_samples_per_second: 12.871
157
+ - train_steps_per_second: 0.05
158
+
159
+ ### Framework versions
160
+
161
+ - Transformers 4.55.0
162
+ - Pytorch 2.6.0+cu124
163
+ - Datasets 3.6.0
164
+ - Tokenizers 0.21.1
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 4.871785990877872e+18,
4
+ "train_loss": 0.6260631282554998,
5
+ "train_runtime": 6928.3413,
6
+ "train_samples_per_second": 12.871,
7
+ "train_steps_per_second": 0.05
8
+ }
chat_template.jinja ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}{% if system_message is defined %}{{ 'System: ' + system_message + '</s>' + '
2
+ ' }}{% endif %}{% for message in loop_messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'Human: ' + content + '</s>' + '
3
+ Assistant:' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '
4
+ ' }}{% endif %}{% endfor %}
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MistralForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "head_dim": null,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "rms_norm_eps": 1e-05,
19
+ "rope_theta": 1000000.0,
20
+ "sliding_window": null,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.55.0",
24
+ "use_cache": false,
25
+ "vocab_size": 32768
26
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.55.0"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32bd83189edfae6c02e34773dd429747f5c7acc5f305bc0a7e53e06c91864727
3
+ size 4949453792
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51821c5b0119c1cd86f4624ecc1dc4ebda99d747ff78fa144ec52b14d8384f05
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c847514a586c038163c39cb8f76e62111ce0f1b5873e592d388d147c76d1c919
3
+ size 4546807800
model.safetensors.index.json ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 7248023552,
4
+ "total_size": 14496047104
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00003-of-00003.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
27
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
29
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
31
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
32
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
36
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
117
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
126
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
144
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
149
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
153
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
216
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
225
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
243
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
297
+ "model.norm.weight": "model-00003-of-00003.safetensors"
298
+ }
299
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f00374dea48658ee8f5d0f21895b9bc55cb0103939607c8185bfd1c6ca1f89
3
+ size 587404
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 4.871785990877872e+18,
4
+ "train_loss": 0.6260631282554998,
5
+ "train_runtime": 6928.3413,
6
+ "train_samples_per_second": 12.871,
7
+ "train_steps_per_second": 0.05
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 5, "total_steps": 349, "loss": 0.7414, "lr": 3.6363636363636366e-06, "epoch": 0.014352350197344816, "percentage": 1.43, "elapsed_time": "0:01:46", "remaining_time": "2:02:12"}
2
+ {"current_steps": 10, "total_steps": 349, "loss": 0.7038, "lr": 8.181818181818183e-06, "epoch": 0.02870470039468963, "percentage": 2.87, "elapsed_time": "0:03:29", "remaining_time": "1:58:14"}
3
+ {"current_steps": 15, "total_steps": 349, "loss": 0.7009, "lr": 9.998056338091415e-06, "epoch": 0.04305705059203445, "percentage": 4.3, "elapsed_time": "0:05:06", "remaining_time": "1:53:40"}
4
+ {"current_steps": 20, "total_steps": 349, "loss": 0.6751, "lr": 9.986183876164412e-06, "epoch": 0.05740940078937926, "percentage": 5.73, "elapsed_time": "0:06:44", "remaining_time": "1:50:46"}
5
+ {"current_steps": 25, "total_steps": 349, "loss": 0.6547, "lr": 9.96354437049027e-06, "epoch": 0.07176175098672408, "percentage": 7.16, "elapsed_time": "0:08:23", "remaining_time": "1:48:39"}
6
+ {"current_steps": 30, "total_steps": 349, "loss": 0.6616, "lr": 9.930186708264902e-06, "epoch": 0.0861141011840689, "percentage": 8.6, "elapsed_time": "0:10:06", "remaining_time": "1:47:34"}
7
+ {"current_steps": 35, "total_steps": 349, "loss": 0.6828, "lr": 9.88618292120984e-06, "epoch": 0.1004664513814137, "percentage": 10.03, "elapsed_time": "0:11:42", "remaining_time": "1:45:05"}
8
+ {"current_steps": 40, "total_steps": 349, "loss": 0.6486, "lr": 9.831628030028698e-06, "epoch": 0.11481880157875853, "percentage": 11.46, "elapsed_time": "0:13:23", "remaining_time": "1:43:25"}
9
+ {"current_steps": 45, "total_steps": 349, "loss": 0.6592, "lr": 9.76663983922178e-06, "epoch": 0.12917115177610333, "percentage": 12.89, "elapsed_time": "0:15:02", "remaining_time": "1:41:33"}
10
+ {"current_steps": 50, "total_steps": 349, "loss": 0.6616, "lr": 9.691358682701927e-06, "epoch": 0.14352350197344815, "percentage": 14.33, "elapsed_time": "0:16:42", "remaining_time": "1:39:55"}
11
+ {"current_steps": 55, "total_steps": 349, "loss": 0.6479, "lr": 9.605947120760878e-06, "epoch": 0.15787585217079297, "percentage": 15.76, "elapsed_time": "0:18:21", "remaining_time": "1:38:10"}
12
+ {"current_steps": 60, "total_steps": 349, "loss": 0.6677, "lr": 9.510589589040554e-06, "epoch": 0.1722282023681378, "percentage": 17.19, "elapsed_time": "0:19:58", "remaining_time": "1:36:14"}
13
+ {"current_steps": 65, "total_steps": 349, "loss": 0.6507, "lr": 9.405492000267228e-06, "epoch": 0.1865805525654826, "percentage": 18.62, "elapsed_time": "0:21:39", "remaining_time": "1:34:37"}
14
+ {"current_steps": 70, "total_steps": 349, "loss": 0.6615, "lr": 9.29088129960862e-06, "epoch": 0.2009329027628274, "percentage": 20.06, "elapsed_time": "0:23:14", "remaining_time": "1:32:37"}
15
+ {"current_steps": 75, "total_steps": 349, "loss": 0.6522, "lr": 9.16700497461403e-06, "epoch": 0.21528525296017223, "percentage": 21.49, "elapsed_time": "0:24:52", "remaining_time": "1:30:53"}
16
+ {"current_steps": 80, "total_steps": 349, "loss": 0.6521, "lr": 9.034130520795774e-06, "epoch": 0.22963760315751705, "percentage": 22.92, "elapsed_time": "0:26:30", "remaining_time": "1:29:07"}
17
+ {"current_steps": 85, "total_steps": 349, "loss": 0.6496, "lr": 8.892544864005899e-06, "epoch": 0.24398995335486187, "percentage": 24.36, "elapsed_time": "0:28:07", "remaining_time": "1:27:20"}
18
+ {"current_steps": 90, "total_steps": 349, "loss": 0.6378, "lr": 8.742553740855507e-06, "epoch": 0.25834230355220666, "percentage": 25.79, "elapsed_time": "0:29:47", "remaining_time": "1:25:44"}
19
+ {"current_steps": 95, "total_steps": 349, "loss": 0.6422, "lr": 8.584481038514573e-06, "epoch": 0.2726946537495515, "percentage": 27.22, "elapsed_time": "0:31:25", "remaining_time": "1:24:00"}
20
+ {"current_steps": 100, "total_steps": 349, "loss": 0.6441, "lr": 8.418668095317912e-06, "epoch": 0.2870470039468963, "percentage": 28.65, "elapsed_time": "0:33:07", "remaining_time": "1:22:28"}
21
+ {"current_steps": 105, "total_steps": 349, "loss": 0.6333, "lr": 8.245472963687484e-06, "epoch": 0.3013993541442411, "percentage": 30.09, "elapsed_time": "0:34:45", "remaining_time": "1:20:45"}
22
+ {"current_steps": 110, "total_steps": 349, "loss": 0.6393, "lr": 8.065269636962765e-06, "epoch": 0.31575170434158595, "percentage": 31.52, "elapsed_time": "0:36:27", "remaining_time": "1:19:12"}
23
+ {"current_steps": 115, "total_steps": 349, "loss": 0.647, "lr": 7.878447241808634e-06, "epoch": 0.33010405453893077, "percentage": 32.95, "elapsed_time": "0:38:08", "remaining_time": "1:17:36"}
24
+ {"current_steps": 120, "total_steps": 349, "loss": 0.6226, "lr": 7.685409197944768e-06, "epoch": 0.3444564047362756, "percentage": 34.38, "elapsed_time": "0:39:47", "remaining_time": "1:15:55"}
25
+ {"current_steps": 125, "total_steps": 349, "loss": 0.6442, "lr": 7.486572347010937e-06, "epoch": 0.35880875493362036, "percentage": 35.82, "elapsed_time": "0:41:23", "remaining_time": "1:14:11"}
26
+ {"current_steps": 130, "total_steps": 349, "loss": 0.6434, "lr": 7.282366052449351e-06, "epoch": 0.3731611051309652, "percentage": 37.25, "elapsed_time": "0:43:01", "remaining_time": "1:12:28"}
27
+ {"current_steps": 135, "total_steps": 349, "loss": 0.628, "lr": 7.073231272347714e-06, "epoch": 0.38751345532831, "percentage": 38.68, "elapsed_time": "0:44:40", "remaining_time": "1:10:49"}
28
+ {"current_steps": 140, "total_steps": 349, "loss": 0.6349, "lr": 6.859619607245102e-06, "epoch": 0.4018658055256548, "percentage": 40.11, "elapsed_time": "0:46:20", "remaining_time": "1:09:10"}
29
+ {"current_steps": 145, "total_steps": 349, "loss": 0.6159, "lr": 6.641992324956776e-06, "epoch": 0.41621815572299964, "percentage": 41.55, "elapsed_time": "0:47:54", "remaining_time": "1:07:24"}
30
+ {"current_steps": 150, "total_steps": 349, "loss": 0.6397, "lr": 6.4208193645237314e-06, "epoch": 0.43057050592034446, "percentage": 42.98, "elapsed_time": "0:49:33", "remaining_time": "1:05:44"}
31
+ {"current_steps": 155, "total_steps": 349, "loss": 0.6268, "lr": 6.1965783214377895e-06, "epoch": 0.4449228561176893, "percentage": 44.41, "elapsed_time": "0:51:12", "remaining_time": "1:04:05"}
32
+ {"current_steps": 160, "total_steps": 349, "loss": 0.6302, "lr": 5.9697534163335645e-06, "epoch": 0.4592752063150341, "percentage": 45.85, "elapsed_time": "0:52:50", "remaining_time": "1:02:25"}
33
+ {"current_steps": 165, "total_steps": 349, "loss": 0.6163, "lr": 5.740834449374237e-06, "epoch": 0.4736275565123789, "percentage": 47.28, "elapsed_time": "0:54:35", "remaining_time": "1:00:52"}
34
+ {"current_steps": 170, "total_steps": 349, "loss": 0.6243, "lr": 5.510315742589042e-06, "epoch": 0.48797990670972374, "percentage": 48.71, "elapsed_time": "0:56:18", "remaining_time": "0:59:17"}
35
+ {"current_steps": 175, "total_steps": 349, "loss": 0.6245, "lr": 5.278695072446342e-06, "epoch": 0.5023322569070685, "percentage": 50.14, "elapsed_time": "0:57:57", "remaining_time": "0:57:37"}
36
+ {"current_steps": 180, "total_steps": 349, "loss": 0.607, "lr": 5.046472594967279e-06, "epoch": 0.5166846071044133, "percentage": 51.58, "elapsed_time": "0:59:33", "remaining_time": "0:55:55"}
37
+ {"current_steps": 185, "total_steps": 349, "loss": 0.609, "lr": 4.814149765701059e-06, "epoch": 0.5310369573017582, "percentage": 53.01, "elapsed_time": "1:01:14", "remaining_time": "0:54:17"}
38
+ {"current_steps": 190, "total_steps": 349, "loss": 0.6119, "lr": 4.582228256894093e-06, "epoch": 0.545389307499103, "percentage": 54.44, "elapsed_time": "1:02:56", "remaining_time": "0:52:39"}
39
+ {"current_steps": 195, "total_steps": 349, "loss": 0.6291, "lr": 4.351208874191192e-06, "epoch": 0.5597416576964478, "percentage": 55.87, "elapsed_time": "1:04:40", "remaining_time": "0:51:04"}
40
+ {"current_steps": 200, "total_steps": 349, "loss": 0.6255, "lr": 4.121590475208071e-06, "epoch": 0.5740940078937926, "percentage": 57.31, "elapsed_time": "1:06:17", "remaining_time": "0:49:23"}
41
+ {"current_steps": 205, "total_steps": 349, "loss": 0.6269, "lr": 3.8938688923104015e-06, "epoch": 0.5884463580911374, "percentage": 58.74, "elapsed_time": "1:07:55", "remaining_time": "0:47:43"}
42
+ {"current_steps": 210, "total_steps": 349, "loss": 0.6278, "lr": 3.668535861925509e-06, "epoch": 0.6027987082884823, "percentage": 60.17, "elapsed_time": "1:09:29", "remaining_time": "0:46:00"}
43
+ {"current_steps": 215, "total_steps": 349, "loss": 0.6, "lr": 3.4460779626987186e-06, "epoch": 0.6171510584858271, "percentage": 61.6, "elapsed_time": "1:11:03", "remaining_time": "0:44:17"}
44
+ {"current_steps": 220, "total_steps": 349, "loss": 0.5925, "lr": 3.226975564787322e-06, "epoch": 0.6315034086831719, "percentage": 63.04, "elapsed_time": "1:12:43", "remaining_time": "0:42:38"}
45
+ {"current_steps": 225, "total_steps": 349, "loss": 0.6035, "lr": 3.0117017925609802e-06, "epoch": 0.6458557588805167, "percentage": 64.47, "elapsed_time": "1:14:18", "remaining_time": "0:40:56"}
46
+ {"current_steps": 230, "total_steps": 349, "loss": 0.6022, "lr": 2.800721502948506e-06, "epoch": 0.6602081090778615, "percentage": 65.9, "elapsed_time": "1:15:58", "remaining_time": "0:39:18"}
47
+ {"current_steps": 235, "total_steps": 349, "loss": 0.5966, "lr": 2.5944902816371573e-06, "epoch": 0.6745604592752064, "percentage": 67.34, "elapsed_time": "1:17:39", "remaining_time": "0:37:40"}
48
+ {"current_steps": 240, "total_steps": 349, "loss": 0.5839, "lr": 2.3934534592920416e-06, "epoch": 0.6889128094725512, "percentage": 68.77, "elapsed_time": "1:19:16", "remaining_time": "0:36:00"}
49
+ {"current_steps": 245, "total_steps": 349, "loss": 0.6063, "lr": 2.1980451499199262e-06, "epoch": 0.703265159669896, "percentage": 70.2, "elapsed_time": "1:20:54", "remaining_time": "0:34:20"}
50
+ {"current_steps": 250, "total_steps": 349, "loss": 0.6068, "lr": 2.0086873134540626e-06, "epoch": 0.7176175098672407, "percentage": 71.63, "elapsed_time": "1:22:28", "remaining_time": "0:32:39"}
51
+ {"current_steps": 255, "total_steps": 349, "loss": 0.5948, "lr": 1.8257888445842026e-06, "epoch": 0.7319698600645855, "percentage": 73.07, "elapsed_time": "1:24:10", "remaining_time": "0:31:01"}
52
+ {"current_steps": 260, "total_steps": 349, "loss": 0.5932, "lr": 1.6497446897993885e-06, "epoch": 0.7463222102619304, "percentage": 74.5, "elapsed_time": "1:25:50", "remaining_time": "0:29:23"}
53
+ {"current_steps": 265, "total_steps": 349, "loss": 0.5998, "lr": 1.4809349945501422e-06, "epoch": 0.7606745604592752, "percentage": 75.93, "elapsed_time": "1:27:27", "remaining_time": "0:27:43"}
54
+ {"current_steps": 270, "total_steps": 349, "loss": 0.6135, "lr": 1.319724282371664e-06, "epoch": 0.77502691065662, "percentage": 77.36, "elapsed_time": "1:29:06", "remaining_time": "0:26:04"}
55
+ {"current_steps": 275, "total_steps": 349, "loss": 0.6069, "lr": 1.1664606677406025e-06, "epoch": 0.7893792608539648, "percentage": 78.8, "elapsed_time": "1:30:48", "remaining_time": "0:24:26"}
56
+ {"current_steps": 280, "total_steps": 349, "loss": 0.5974, "lr": 1.0214751043651582e-06, "epoch": 0.8037316110513096, "percentage": 80.23, "elapsed_time": "1:32:25", "remaining_time": "0:22:46"}
57
+ {"current_steps": 285, "total_steps": 349, "loss": 0.5973, "lr": 8.850806705317183e-07, "epoch": 0.8180839612486545, "percentage": 81.66, "elapsed_time": "1:33:59", "remaining_time": "0:21:06"}
58
+ {"current_steps": 290, "total_steps": 349, "loss": 0.5867, "lr": 7.575718930512516e-07, "epoch": 0.8324363114459993, "percentage": 83.09, "elapsed_time": "1:35:36", "remaining_time": "0:19:27"}
59
+ {"current_steps": 295, "total_steps": 349, "loss": 0.61, "lr": 6.392241112653031e-07, "epoch": 0.8467886616433441, "percentage": 84.53, "elapsed_time": "1:37:13", "remaining_time": "0:17:47"}
60
+ {"current_steps": 300, "total_steps": 349, "loss": 0.6084, "lr": 5.302928824849335e-07, "epoch": 0.8611410118406889, "percentage": 85.96, "elapsed_time": "1:38:48", "remaining_time": "0:16:08"}
61
+ {"current_steps": 305, "total_steps": 349, "loss": 0.599, "lr": 4.3101343014651356e-07, "epoch": 0.8754933620380337, "percentage": 87.39, "elapsed_time": "1:40:28", "remaining_time": "0:14:29"}
62
+ {"current_steps": 310, "total_steps": 349, "loss": 0.5911, "lr": 3.416001358759635e-07, "epoch": 0.8898457122353786, "percentage": 88.83, "elapsed_time": "1:42:05", "remaining_time": "0:12:50"}
63
+ {"current_steps": 315, "total_steps": 349, "loss": 0.5886, "lr": 2.6224607655831236e-07, "epoch": 0.9041980624327234, "percentage": 90.26, "elapsed_time": "1:43:45", "remaining_time": "0:11:11"}
64
+ {"current_steps": 320, "total_steps": 349, "loss": 0.5917, "lr": 1.9312260741218114e-07, "epoch": 0.9185504126300682, "percentage": 91.69, "elapsed_time": "1:45:26", "remaining_time": "0:09:33"}
65
+ {"current_steps": 325, "total_steps": 349, "loss": 0.5799, "lr": 1.3437899196950765e-07, "epoch": 0.932902762827413, "percentage": 93.12, "elapsed_time": "1:47:14", "remaining_time": "0:07:55"}
66
+ {"current_steps": 330, "total_steps": 349, "loss": 0.6015, "lr": 8.614207975952083e-08, "epoch": 0.9472551130247578, "percentage": 94.56, "elapsed_time": "1:48:53", "remaining_time": "0:06:16"}
67
+ {"current_steps": 335, "total_steps": 349, "loss": 0.5848, "lr": 4.851603239296065e-08, "epoch": 0.9616074632221027, "percentage": 95.99, "elapsed_time": "1:50:31", "remaining_time": "0:04:37"}
68
+ {"current_steps": 340, "total_steps": 349, "loss": 0.5879, "lr": 2.158209863804217e-08, "epoch": 0.9759598134194475, "percentage": 97.42, "elapsed_time": "1:52:08", "remaining_time": "0:02:58"}
69
+ {"current_steps": 345, "total_steps": 349, "loss": 0.5836, "lr": 5.398438973845954e-09, "epoch": 0.9903121636167922, "percentage": 98.85, "elapsed_time": "1:53:47", "remaining_time": "0:01:19"}
70
+ {"current_steps": 349, "total_steps": 349, "epoch": 1.0, "percentage": 100.0, "elapsed_time": "1:55:28", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 349,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.014352350197344816,
14
+ "grad_norm": 11.071812629699707,
15
+ "learning_rate": 3.6363636363636366e-06,
16
+ "loss": 0.7414,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.02870470039468963,
21
+ "grad_norm": 7.1317901611328125,
22
+ "learning_rate": 8.181818181818183e-06,
23
+ "loss": 0.7038,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.04305705059203445,
28
+ "grad_norm": 4.095168113708496,
29
+ "learning_rate": 9.998056338091415e-06,
30
+ "loss": 0.7009,
31
+ "step": 15
32
+ },
33
+ {
34
+ "epoch": 0.05740940078937926,
35
+ "grad_norm": 2.938896894454956,
36
+ "learning_rate": 9.986183876164412e-06,
37
+ "loss": 0.6751,
38
+ "step": 20
39
+ },
40
+ {
41
+ "epoch": 0.07176175098672408,
42
+ "grad_norm": 3.5328586101531982,
43
+ "learning_rate": 9.96354437049027e-06,
44
+ "loss": 0.6547,
45
+ "step": 25
46
+ },
47
+ {
48
+ "epoch": 0.0861141011840689,
49
+ "grad_norm": 2.831934690475464,
50
+ "learning_rate": 9.930186708264902e-06,
51
+ "loss": 0.6616,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 0.1004664513814137,
56
+ "grad_norm": 2.707149028778076,
57
+ "learning_rate": 9.88618292120984e-06,
58
+ "loss": 0.6828,
59
+ "step": 35
60
+ },
61
+ {
62
+ "epoch": 0.11481880157875853,
63
+ "grad_norm": 2.4383013248443604,
64
+ "learning_rate": 9.831628030028698e-06,
65
+ "loss": 0.6486,
66
+ "step": 40
67
+ },
68
+ {
69
+ "epoch": 0.12917115177610333,
70
+ "grad_norm": 2.7486937046051025,
71
+ "learning_rate": 9.76663983922178e-06,
72
+ "loss": 0.6592,
73
+ "step": 45
74
+ },
75
+ {
76
+ "epoch": 0.14352350197344815,
77
+ "grad_norm": 2.4958226680755615,
78
+ "learning_rate": 9.691358682701927e-06,
79
+ "loss": 0.6616,
80
+ "step": 50
81
+ },
82
+ {
83
+ "epoch": 0.15787585217079297,
84
+ "grad_norm": 2.9137589931488037,
85
+ "learning_rate": 9.605947120760878e-06,
86
+ "loss": 0.6479,
87
+ "step": 55
88
+ },
89
+ {
90
+ "epoch": 0.1722282023681378,
91
+ "grad_norm": 2.6844582557678223,
92
+ "learning_rate": 9.510589589040554e-06,
93
+ "loss": 0.6677,
94
+ "step": 60
95
+ },
96
+ {
97
+ "epoch": 0.1865805525654826,
98
+ "grad_norm": 2.127028703689575,
99
+ "learning_rate": 9.405492000267228e-06,
100
+ "loss": 0.6507,
101
+ "step": 65
102
+ },
103
+ {
104
+ "epoch": 0.2009329027628274,
105
+ "grad_norm": 2.487703800201416,
106
+ "learning_rate": 9.29088129960862e-06,
107
+ "loss": 0.6615,
108
+ "step": 70
109
+ },
110
+ {
111
+ "epoch": 0.21528525296017223,
112
+ "grad_norm": 2.5370140075683594,
113
+ "learning_rate": 9.16700497461403e-06,
114
+ "loss": 0.6522,
115
+ "step": 75
116
+ },
117
+ {
118
+ "epoch": 0.22963760315751705,
119
+ "grad_norm": 2.203188896179199,
120
+ "learning_rate": 9.034130520795774e-06,
121
+ "loss": 0.6521,
122
+ "step": 80
123
+ },
124
+ {
125
+ "epoch": 0.24398995335486187,
126
+ "grad_norm": 2.402724504470825,
127
+ "learning_rate": 8.892544864005899e-06,
128
+ "loss": 0.6496,
129
+ "step": 85
130
+ },
131
+ {
132
+ "epoch": 0.25834230355220666,
133
+ "grad_norm": 2.3624093532562256,
134
+ "learning_rate": 8.742553740855507e-06,
135
+ "loss": 0.6378,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.2726946537495515,
140
+ "grad_norm": 2.36360239982605,
141
+ "learning_rate": 8.584481038514573e-06,
142
+ "loss": 0.6422,
143
+ "step": 95
144
+ },
145
+ {
146
+ "epoch": 0.2870470039468963,
147
+ "grad_norm": 2.0742623805999756,
148
+ "learning_rate": 8.418668095317912e-06,
149
+ "loss": 0.6441,
150
+ "step": 100
151
+ },
152
+ {
153
+ "epoch": 0.3013993541442411,
154
+ "grad_norm": 2.300394058227539,
155
+ "learning_rate": 8.245472963687484e-06,
156
+ "loss": 0.6333,
157
+ "step": 105
158
+ },
159
+ {
160
+ "epoch": 0.31575170434158595,
161
+ "grad_norm": 2.1633546352386475,
162
+ "learning_rate": 8.065269636962765e-06,
163
+ "loss": 0.6393,
164
+ "step": 110
165
+ },
166
+ {
167
+ "epoch": 0.33010405453893077,
168
+ "grad_norm": 2.291600227355957,
169
+ "learning_rate": 7.878447241808634e-06,
170
+ "loss": 0.647,
171
+ "step": 115
172
+ },
173
+ {
174
+ "epoch": 0.3444564047362756,
175
+ "grad_norm": 2.0962560176849365,
176
+ "learning_rate": 7.685409197944768e-06,
177
+ "loss": 0.6226,
178
+ "step": 120
179
+ },
180
+ {
181
+ "epoch": 0.35880875493362036,
182
+ "grad_norm": 2.4263839721679688,
183
+ "learning_rate": 7.486572347010937e-06,
184
+ "loss": 0.6442,
185
+ "step": 125
186
+ },
187
+ {
188
+ "epoch": 0.3731611051309652,
189
+ "grad_norm": 2.1769137382507324,
190
+ "learning_rate": 7.282366052449351e-06,
191
+ "loss": 0.6434,
192
+ "step": 130
193
+ },
194
+ {
195
+ "epoch": 0.38751345532831,
196
+ "grad_norm": 2.181265115737915,
197
+ "learning_rate": 7.073231272347714e-06,
198
+ "loss": 0.628,
199
+ "step": 135
200
+ },
201
+ {
202
+ "epoch": 0.4018658055256548,
203
+ "grad_norm": 2.1604084968566895,
204
+ "learning_rate": 6.859619607245102e-06,
205
+ "loss": 0.6349,
206
+ "step": 140
207
+ },
208
+ {
209
+ "epoch": 0.41621815572299964,
210
+ "grad_norm": 2.0021893978118896,
211
+ "learning_rate": 6.641992324956776e-06,
212
+ "loss": 0.6159,
213
+ "step": 145
214
+ },
215
+ {
216
+ "epoch": 0.43057050592034446,
217
+ "grad_norm": 2.3016304969787598,
218
+ "learning_rate": 6.4208193645237314e-06,
219
+ "loss": 0.6397,
220
+ "step": 150
221
+ },
222
+ {
223
+ "epoch": 0.4449228561176893,
224
+ "grad_norm": 2.1002519130706787,
225
+ "learning_rate": 6.1965783214377895e-06,
226
+ "loss": 0.6268,
227
+ "step": 155
228
+ },
229
+ {
230
+ "epoch": 0.4592752063150341,
231
+ "grad_norm": 4.018091201782227,
232
+ "learning_rate": 5.9697534163335645e-06,
233
+ "loss": 0.6302,
234
+ "step": 160
235
+ },
236
+ {
237
+ "epoch": 0.4736275565123789,
238
+ "grad_norm": 1.9025262594223022,
239
+ "learning_rate": 5.740834449374237e-06,
240
+ "loss": 0.6163,
241
+ "step": 165
242
+ },
243
+ {
244
+ "epoch": 0.48797990670972374,
245
+ "grad_norm": 1.9070066213607788,
246
+ "learning_rate": 5.510315742589042e-06,
247
+ "loss": 0.6243,
248
+ "step": 170
249
+ },
250
+ {
251
+ "epoch": 0.5023322569070685,
252
+ "grad_norm": 2.002209186553955,
253
+ "learning_rate": 5.278695072446342e-06,
254
+ "loss": 0.6245,
255
+ "step": 175
256
+ },
257
+ {
258
+ "epoch": 0.5166846071044133,
259
+ "grad_norm": 1.9884896278381348,
260
+ "learning_rate": 5.046472594967279e-06,
261
+ "loss": 0.607,
262
+ "step": 180
263
+ },
264
+ {
265
+ "epoch": 0.5310369573017582,
266
+ "grad_norm": 2.062976837158203,
267
+ "learning_rate": 4.814149765701059e-06,
268
+ "loss": 0.609,
269
+ "step": 185
270
+ },
271
+ {
272
+ "epoch": 0.545389307499103,
273
+ "grad_norm": 1.9803415536880493,
274
+ "learning_rate": 4.582228256894093e-06,
275
+ "loss": 0.6119,
276
+ "step": 190
277
+ },
278
+ {
279
+ "epoch": 0.5597416576964478,
280
+ "grad_norm": 1.8966432809829712,
281
+ "learning_rate": 4.351208874191192e-06,
282
+ "loss": 0.6291,
283
+ "step": 195
284
+ },
285
+ {
286
+ "epoch": 0.5740940078937926,
287
+ "grad_norm": 2.038236618041992,
288
+ "learning_rate": 4.121590475208071e-06,
289
+ "loss": 0.6255,
290
+ "step": 200
291
+ },
292
+ {
293
+ "epoch": 0.5884463580911374,
294
+ "grad_norm": 2.072604179382324,
295
+ "learning_rate": 3.8938688923104015e-06,
296
+ "loss": 0.6269,
297
+ "step": 205
298
+ },
299
+ {
300
+ "epoch": 0.6027987082884823,
301
+ "grad_norm": 2.125389814376831,
302
+ "learning_rate": 3.668535861925509e-06,
303
+ "loss": 0.6278,
304
+ "step": 210
305
+ },
306
+ {
307
+ "epoch": 0.6171510584858271,
308
+ "grad_norm": 2.0525448322296143,
309
+ "learning_rate": 3.4460779626987186e-06,
310
+ "loss": 0.6,
311
+ "step": 215
312
+ },
313
+ {
314
+ "epoch": 0.6315034086831719,
315
+ "grad_norm": 1.934063196182251,
316
+ "learning_rate": 3.226975564787322e-06,
317
+ "loss": 0.5925,
318
+ "step": 220
319
+ },
320
+ {
321
+ "epoch": 0.6458557588805167,
322
+ "grad_norm": 1.8446972370147705,
323
+ "learning_rate": 3.0117017925609802e-06,
324
+ "loss": 0.6035,
325
+ "step": 225
326
+ },
327
+ {
328
+ "epoch": 0.6602081090778615,
329
+ "grad_norm": 2.0047950744628906,
330
+ "learning_rate": 2.800721502948506e-06,
331
+ "loss": 0.6022,
332
+ "step": 230
333
+ },
334
+ {
335
+ "epoch": 0.6745604592752064,
336
+ "grad_norm": 1.9434928894042969,
337
+ "learning_rate": 2.5944902816371573e-06,
338
+ "loss": 0.5966,
339
+ "step": 235
340
+ },
341
+ {
342
+ "epoch": 0.6889128094725512,
343
+ "grad_norm": 1.9444347620010376,
344
+ "learning_rate": 2.3934534592920416e-06,
345
+ "loss": 0.5839,
346
+ "step": 240
347
+ },
348
+ {
349
+ "epoch": 0.703265159669896,
350
+ "grad_norm": 1.9846431016921997,
351
+ "learning_rate": 2.1980451499199262e-06,
352
+ "loss": 0.6063,
353
+ "step": 245
354
+ },
355
+ {
356
+ "epoch": 0.7176175098672407,
357
+ "grad_norm": 1.8537969589233398,
358
+ "learning_rate": 2.0086873134540626e-06,
359
+ "loss": 0.6068,
360
+ "step": 250
361
+ },
362
+ {
363
+ "epoch": 0.7319698600645855,
364
+ "grad_norm": 1.9193341732025146,
365
+ "learning_rate": 1.8257888445842026e-06,
366
+ "loss": 0.5948,
367
+ "step": 255
368
+ },
369
+ {
370
+ "epoch": 0.7463222102619304,
371
+ "grad_norm": 1.9606044292449951,
372
+ "learning_rate": 1.6497446897993885e-06,
373
+ "loss": 0.5932,
374
+ "step": 260
375
+ },
376
+ {
377
+ "epoch": 0.7606745604592752,
378
+ "grad_norm": 1.9930768013000488,
379
+ "learning_rate": 1.4809349945501422e-06,
380
+ "loss": 0.5998,
381
+ "step": 265
382
+ },
383
+ {
384
+ "epoch": 0.77502691065662,
385
+ "grad_norm": 1.9653593301773071,
386
+ "learning_rate": 1.319724282371664e-06,
387
+ "loss": 0.6135,
388
+ "step": 270
389
+ },
390
+ {
391
+ "epoch": 0.7893792608539648,
392
+ "grad_norm": 1.7707090377807617,
393
+ "learning_rate": 1.1664606677406025e-06,
394
+ "loss": 0.6069,
395
+ "step": 275
396
+ },
397
+ {
398
+ "epoch": 0.8037316110513096,
399
+ "grad_norm": 1.9296361207962036,
400
+ "learning_rate": 1.0214751043651582e-06,
401
+ "loss": 0.5974,
402
+ "step": 280
403
+ },
404
+ {
405
+ "epoch": 0.8180839612486545,
406
+ "grad_norm": 1.89194917678833,
407
+ "learning_rate": 8.850806705317183e-07,
408
+ "loss": 0.5973,
409
+ "step": 285
410
+ },
411
+ {
412
+ "epoch": 0.8324363114459993,
413
+ "grad_norm": 1.991350769996643,
414
+ "learning_rate": 7.575718930512516e-07,
415
+ "loss": 0.5867,
416
+ "step": 290
417
+ },
418
+ {
419
+ "epoch": 0.8467886616433441,
420
+ "grad_norm": 1.789543867111206,
421
+ "learning_rate": 6.392241112653031e-07,
422
+ "loss": 0.61,
423
+ "step": 295
424
+ },
425
+ {
426
+ "epoch": 0.8611410118406889,
427
+ "grad_norm": 1.8926385641098022,
428
+ "learning_rate": 5.302928824849335e-07,
429
+ "loss": 0.6084,
430
+ "step": 300
431
+ },
432
+ {
433
+ "epoch": 0.8754933620380337,
434
+ "grad_norm": 1.8285890817642212,
435
+ "learning_rate": 4.3101343014651356e-07,
436
+ "loss": 0.599,
437
+ "step": 305
438
+ },
439
+ {
440
+ "epoch": 0.8898457122353786,
441
+ "grad_norm": 1.9200646877288818,
442
+ "learning_rate": 3.416001358759635e-07,
443
+ "loss": 0.5911,
444
+ "step": 310
445
+ },
446
+ {
447
+ "epoch": 0.9041980624327234,
448
+ "grad_norm": 1.8193855285644531,
449
+ "learning_rate": 2.6224607655831236e-07,
450
+ "loss": 0.5886,
451
+ "step": 315
452
+ },
453
+ {
454
+ "epoch": 0.9185504126300682,
455
+ "grad_norm": 1.7819342613220215,
456
+ "learning_rate": 1.9312260741218114e-07,
457
+ "loss": 0.5917,
458
+ "step": 320
459
+ },
460
+ {
461
+ "epoch": 0.932902762827413,
462
+ "grad_norm": 1.8590822219848633,
463
+ "learning_rate": 1.3437899196950765e-07,
464
+ "loss": 0.5799,
465
+ "step": 325
466
+ },
467
+ {
468
+ "epoch": 0.9472551130247578,
469
+ "grad_norm": 1.7508701086044312,
470
+ "learning_rate": 8.614207975952083e-08,
471
+ "loss": 0.6015,
472
+ "step": 330
473
+ },
474
+ {
475
+ "epoch": 0.9616074632221027,
476
+ "grad_norm": 1.8058841228485107,
477
+ "learning_rate": 4.851603239296065e-08,
478
+ "loss": 0.5848,
479
+ "step": 335
480
+ },
481
+ {
482
+ "epoch": 0.9759598134194475,
483
+ "grad_norm": 1.788930892944336,
484
+ "learning_rate": 2.158209863804217e-08,
485
+ "loss": 0.5879,
486
+ "step": 340
487
+ },
488
+ {
489
+ "epoch": 0.9903121636167922,
490
+ "grad_norm": 1.9363088607788086,
491
+ "learning_rate": 5.398438973845954e-09,
492
+ "loss": 0.5836,
493
+ "step": 345
494
+ }
495
+ ],
496
+ "logging_steps": 5,
497
+ "max_steps": 349,
498
+ "num_input_tokens_seen": 0,
499
+ "num_train_epochs": 1,
500
+ "save_steps": 500,
501
+ "stateful_callbacks": {
502
+ "TrainerControl": {
503
+ "args": {
504
+ "should_epoch_stop": false,
505
+ "should_evaluate": false,
506
+ "should_log": false,
507
+ "should_save": true,
508
+ "should_training_stop": true
509
+ },
510
+ "attributes": {}
511
+ }
512
+ },
513
+ "total_flos": 4.871785990877872e+18,
514
+ "train_batch_size": 4,
515
+ "trial_name": null,
516
+ "trial_params": null
517
+ }
training_loss.png ADDED