CMLL commited on
Commit
4a9e478
1 Parent(s): c99dab2

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +58 -0
  2. adapter_config.json +27 -0
  3. adapter_model.safetensors +3 -0
  4. added_tokens.json +5 -0
  5. all_results.json +7 -0
  6. checkpoint-100/README.md +204 -0
  7. checkpoint-100/adapter_config.json +27 -0
  8. checkpoint-100/adapter_model.safetensors +3 -0
  9. checkpoint-100/added_tokens.json +5 -0
  10. checkpoint-100/merges.txt +0 -0
  11. checkpoint-100/optimizer.pt +3 -0
  12. checkpoint-100/rng_state.pth +3 -0
  13. checkpoint-100/scheduler.pt +3 -0
  14. checkpoint-100/special_tokens_map.json +20 -0
  15. checkpoint-100/tokenizer_config.json +44 -0
  16. checkpoint-100/trainer_state.json +141 -0
  17. checkpoint-100/training_args.bin +3 -0
  18. checkpoint-100/vocab.json +0 -0
  19. checkpoint-1000/README.md +204 -0
  20. checkpoint-1000/adapter_config.json +27 -0
  21. checkpoint-1000/adapter_model.safetensors +3 -0
  22. checkpoint-1000/added_tokens.json +5 -0
  23. checkpoint-1000/merges.txt +0 -0
  24. checkpoint-1000/optimizer.pt +3 -0
  25. checkpoint-1000/rng_state.pth +3 -0
  26. checkpoint-1000/scheduler.pt +3 -0
  27. checkpoint-1000/special_tokens_map.json +20 -0
  28. checkpoint-1000/tokenizer_config.json +44 -0
  29. checkpoint-1000/trainer_state.json +1221 -0
  30. checkpoint-1000/training_args.bin +3 -0
  31. checkpoint-1000/vocab.json +0 -0
  32. checkpoint-1100/README.md +204 -0
  33. checkpoint-1100/adapter_config.json +27 -0
  34. checkpoint-1100/adapter_model.safetensors +3 -0
  35. checkpoint-1100/added_tokens.json +5 -0
  36. checkpoint-1100/merges.txt +0 -0
  37. checkpoint-1100/optimizer.pt +3 -0
  38. checkpoint-1100/rng_state.pth +3 -0
  39. checkpoint-1100/scheduler.pt +3 -0
  40. checkpoint-1100/special_tokens_map.json +20 -0
  41. checkpoint-1100/tokenizer_config.json +44 -0
  42. checkpoint-1100/trainer_state.json +1341 -0
  43. checkpoint-1100/training_args.bin +3 -0
  44. checkpoint-1100/vocab.json +0 -0
  45. checkpoint-1200/README.md +204 -0
  46. checkpoint-1200/adapter_config.json +27 -0
  47. checkpoint-1200/adapter_model.safetensors +3 -0
  48. checkpoint-1200/added_tokens.json +5 -0
  49. checkpoint-1200/merges.txt +0 -0
  50. checkpoint-1200/optimizer.pt +3 -0
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: Qwen/Qwen1.5-0.5B-Chat
9
+ model-index:
10
+ - name: train_2024-02-22-01-50-49
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # train_2024-02-22-01-50-49
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen1.5-0.5B-Chat](https://huggingface.co/Qwen/Qwen1.5-0.5B-Chat) on the TCM dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 4
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 4
43
+ - total_train_batch_size: 16
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - num_epochs: 3.0
47
+
48
+ ### Training results
49
+
50
+
51
+
52
+ ### Framework versions
53
+
54
+ - PEFT 0.8.2
55
+ - Transformers 4.37.2
56
+ - Pytorch 2.1.0+cu121
57
+ - Datasets 2.17.1
58
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-0.5B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_rslora": false
27
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a72061ac045d65f070c70835e3ef8fbeee8b539105c37d935cb148d8a1c243c
3
+ size 3158328
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
all_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 2.635484080746734,
4
+ "train_runtime": 8118.4972,
5
+ "train_samples_per_second": 14.744,
6
+ "train_steps_per_second": 0.922
7
+ }
checkpoint-100/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-0.5B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.8.2
checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-0.5B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_rslora": false
27
+ }
checkpoint-100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47cb2f49992f5a7aaef8bdc14b1dfc4ab17a01fc867360b66a8f38be93fcc27f
3
+ size 3158328
checkpoint-100/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0254f7f39d0c7383561d56725c864a1d53a3a3c738e786cd4b9acdc8dc921347
3
+ size 6372346
checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27c78ee3df04a03a4e72e3762f8bd320e7176679cef4aa2cdc0a678cdffe4529
3
+ size 14244
checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af875236f293f3366cb3d9c3c7f77550b6d835517e2e3242699c4c71898cef49
3
+ size 1064
checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "padding_side": "right",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.040096230954290296,
5
+ "eval_steps": 500,
6
+ "global_step": 100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.999997796189788e-05,
14
+ "loss": 2.7465,
15
+ "step": 5
16
+ },
17
+ {
18
+ "epoch": 0.0,
19
+ "learning_rate": 1.9999911847688657e-05,
20
+ "loss": 2.6877,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.01,
25
+ "learning_rate": 1.999980165766374e-05,
26
+ "loss": 2.6848,
27
+ "step": 15
28
+ },
29
+ {
30
+ "epoch": 0.01,
31
+ "learning_rate": 1.9999647392308798e-05,
32
+ "loss": 2.7677,
33
+ "step": 20
34
+ },
35
+ {
36
+ "epoch": 0.01,
37
+ "learning_rate": 1.9999449052303777e-05,
38
+ "loss": 2.7784,
39
+ "step": 25
40
+ },
41
+ {
42
+ "epoch": 0.01,
43
+ "learning_rate": 1.9999206638522888e-05,
44
+ "loss": 2.8161,
45
+ "step": 30
46
+ },
47
+ {
48
+ "epoch": 0.01,
49
+ "learning_rate": 1.9998920152034595e-05,
50
+ "loss": 2.6752,
51
+ "step": 35
52
+ },
53
+ {
54
+ "epoch": 0.02,
55
+ "learning_rate": 1.9998589594101623e-05,
56
+ "loss": 2.6403,
57
+ "step": 40
58
+ },
59
+ {
60
+ "epoch": 0.02,
61
+ "learning_rate": 1.9998214966180948e-05,
62
+ "loss": 2.6433,
63
+ "step": 45
64
+ },
65
+ {
66
+ "epoch": 0.02,
67
+ "learning_rate": 1.999779626992378e-05,
68
+ "loss": 2.5787,
69
+ "step": 50
70
+ },
71
+ {
72
+ "epoch": 0.02,
73
+ "learning_rate": 1.9997333507175583e-05,
74
+ "loss": 2.6812,
75
+ "step": 55
76
+ },
77
+ {
78
+ "epoch": 0.02,
79
+ "learning_rate": 1.9996826679976033e-05,
80
+ "loss": 2.7287,
81
+ "step": 60
82
+ },
83
+ {
84
+ "epoch": 0.03,
85
+ "learning_rate": 1.9996275790559037e-05,
86
+ "loss": 2.6465,
87
+ "step": 65
88
+ },
89
+ {
90
+ "epoch": 0.03,
91
+ "learning_rate": 1.99956808413527e-05,
92
+ "loss": 2.5268,
93
+ "step": 70
94
+ },
95
+ {
96
+ "epoch": 0.03,
97
+ "learning_rate": 1.999504183497934e-05,
98
+ "loss": 2.6893,
99
+ "step": 75
100
+ },
101
+ {
102
+ "epoch": 0.03,
103
+ "learning_rate": 1.9994358774255444e-05,
104
+ "loss": 2.6274,
105
+ "step": 80
106
+ },
107
+ {
108
+ "epoch": 0.03,
109
+ "learning_rate": 1.9993631662191696e-05,
110
+ "loss": 2.6232,
111
+ "step": 85
112
+ },
113
+ {
114
+ "epoch": 0.04,
115
+ "learning_rate": 1.9992860501992924e-05,
116
+ "loss": 2.7188,
117
+ "step": 90
118
+ },
119
+ {
120
+ "epoch": 0.04,
121
+ "learning_rate": 1.9992045297058108e-05,
122
+ "loss": 2.5388,
123
+ "step": 95
124
+ },
125
+ {
126
+ "epoch": 0.04,
127
+ "learning_rate": 1.9991186050980366e-05,
128
+ "loss": 2.6793,
129
+ "step": 100
130
+ }
131
+ ],
132
+ "logging_steps": 5,
133
+ "max_steps": 7482,
134
+ "num_input_tokens_seen": 0,
135
+ "num_train_epochs": 3,
136
+ "save_steps": 100,
137
+ "total_flos": 1277352456683520.0,
138
+ "train_batch_size": 4,
139
+ "trial_name": null,
140
+ "trial_params": null
141
+ }
checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c481a87016b183d9406558b8812ee05f00d60a3c721055833e3cdda34cf9bb26
3
+ size 4920
checkpoint-100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-0.5B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.8.2
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-0.5B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_rslora": false
27
+ }
checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daa3ace4db8788cb931ef75e75be426e406a0c3385c5ed0daee45e45870e81fc
3
+ size 3158328
checkpoint-1000/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77b0f72e8de4935175f876165c3e5b30af28872e5cf0f93f3175b50a7142f51
3
+ size 6372346
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2c0360ce941ee730c6f2a46af569dcd5ffb08bf45fafc080b3769edd586dde9
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbb15b708822e2f21bbe2b8b32e42ae6f47a448e81da506799b4867a45b68e6a
3
+ size 1064
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "padding_side": "right",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,1221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.40096230954290296,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.999997796189788e-05,
14
+ "loss": 2.7465,
15
+ "step": 5
16
+ },
17
+ {
18
+ "epoch": 0.0,
19
+ "learning_rate": 1.9999911847688657e-05,
20
+ "loss": 2.6877,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.01,
25
+ "learning_rate": 1.999980165766374e-05,
26
+ "loss": 2.6848,
27
+ "step": 15
28
+ },
29
+ {
30
+ "epoch": 0.01,
31
+ "learning_rate": 1.9999647392308798e-05,
32
+ "loss": 2.7677,
33
+ "step": 20
34
+ },
35
+ {
36
+ "epoch": 0.01,
37
+ "learning_rate": 1.9999449052303777e-05,
38
+ "loss": 2.7784,
39
+ "step": 25
40
+ },
41
+ {
42
+ "epoch": 0.01,
43
+ "learning_rate": 1.9999206638522888e-05,
44
+ "loss": 2.8161,
45
+ "step": 30
46
+ },
47
+ {
48
+ "epoch": 0.01,
49
+ "learning_rate": 1.9998920152034595e-05,
50
+ "loss": 2.6752,
51
+ "step": 35
52
+ },
53
+ {
54
+ "epoch": 0.02,
55
+ "learning_rate": 1.9998589594101623e-05,
56
+ "loss": 2.6403,
57
+ "step": 40
58
+ },
59
+ {
60
+ "epoch": 0.02,
61
+ "learning_rate": 1.9998214966180948e-05,
62
+ "loss": 2.6433,
63
+ "step": 45
64
+ },
65
+ {
66
+ "epoch": 0.02,
67
+ "learning_rate": 1.999779626992378e-05,
68
+ "loss": 2.5787,
69
+ "step": 50
70
+ },
71
+ {
72
+ "epoch": 0.02,
73
+ "learning_rate": 1.9997333507175583e-05,
74
+ "loss": 2.6812,
75
+ "step": 55
76
+ },
77
+ {
78
+ "epoch": 0.02,
79
+ "learning_rate": 1.9996826679976033e-05,
80
+ "loss": 2.7287,
81
+ "step": 60
82
+ },
83
+ {
84
+ "epoch": 0.03,
85
+ "learning_rate": 1.9996275790559037e-05,
86
+ "loss": 2.6465,
87
+ "step": 65
88
+ },
89
+ {
90
+ "epoch": 0.03,
91
+ "learning_rate": 1.99956808413527e-05,
92
+ "loss": 2.5268,
93
+ "step": 70
94
+ },
95
+ {
96
+ "epoch": 0.03,
97
+ "learning_rate": 1.999504183497934e-05,
98
+ "loss": 2.6893,
99
+ "step": 75
100
+ },
101
+ {
102
+ "epoch": 0.03,
103
+ "learning_rate": 1.9994358774255444e-05,
104
+ "loss": 2.6274,
105
+ "step": 80
106
+ },
107
+ {
108
+ "epoch": 0.03,
109
+ "learning_rate": 1.9993631662191696e-05,
110
+ "loss": 2.6232,
111
+ "step": 85
112
+ },
113
+ {
114
+ "epoch": 0.04,
115
+ "learning_rate": 1.9992860501992924e-05,
116
+ "loss": 2.7188,
117
+ "step": 90
118
+ },
119
+ {
120
+ "epoch": 0.04,
121
+ "learning_rate": 1.9992045297058108e-05,
122
+ "loss": 2.5388,
123
+ "step": 95
124
+ },
125
+ {
126
+ "epoch": 0.04,
127
+ "learning_rate": 1.9991186050980366e-05,
128
+ "loss": 2.6793,
129
+ "step": 100
130
+ },
131
+ {
132
+ "epoch": 0.04,
133
+ "learning_rate": 1.9990282767546926e-05,
134
+ "loss": 2.5523,
135
+ "step": 105
136
+ },
137
+ {
138
+ "epoch": 0.04,
139
+ "learning_rate": 1.998933545073912e-05,
140
+ "loss": 2.5763,
141
+ "step": 110
142
+ },
143
+ {
144
+ "epoch": 0.05,
145
+ "learning_rate": 1.998834410473236e-05,
146
+ "loss": 2.6447,
147
+ "step": 115
148
+ },
149
+ {
150
+ "epoch": 0.05,
151
+ "learning_rate": 1.998730873389612e-05,
152
+ "loss": 2.5579,
153
+ "step": 120
154
+ },
155
+ {
156
+ "epoch": 0.05,
157
+ "learning_rate": 1.998622934279393e-05,
158
+ "loss": 2.5884,
159
+ "step": 125
160
+ },
161
+ {
162
+ "epoch": 0.05,
163
+ "learning_rate": 1.9985105936183327e-05,
164
+ "loss": 2.5051,
165
+ "step": 130
166
+ },
167
+ {
168
+ "epoch": 0.05,
169
+ "learning_rate": 1.9983938519015868e-05,
170
+ "loss": 2.6014,
171
+ "step": 135
172
+ },
173
+ {
174
+ "epoch": 0.06,
175
+ "learning_rate": 1.998272709643708e-05,
176
+ "loss": 2.5878,
177
+ "step": 140
178
+ },
179
+ {
180
+ "epoch": 0.06,
181
+ "learning_rate": 1.998147167378645e-05,
182
+ "loss": 2.6642,
183
+ "step": 145
184
+ },
185
+ {
186
+ "epoch": 0.06,
187
+ "learning_rate": 1.998017225659742e-05,
188
+ "loss": 2.5077,
189
+ "step": 150
190
+ },
191
+ {
192
+ "epoch": 0.06,
193
+ "learning_rate": 1.9978828850597312e-05,
194
+ "loss": 2.5921,
195
+ "step": 155
196
+ },
197
+ {
198
+ "epoch": 0.06,
199
+ "learning_rate": 1.9977441461707358e-05,
200
+ "loss": 2.5577,
201
+ "step": 160
202
+ },
203
+ {
204
+ "epoch": 0.07,
205
+ "learning_rate": 1.9976010096042634e-05,
206
+ "loss": 2.524,
207
+ "step": 165
208
+ },
209
+ {
210
+ "epoch": 0.07,
211
+ "learning_rate": 1.9974534759912068e-05,
212
+ "loss": 2.5708,
213
+ "step": 170
214
+ },
215
+ {
216
+ "epoch": 0.07,
217
+ "learning_rate": 1.997301545981837e-05,
218
+ "loss": 2.5578,
219
+ "step": 175
220
+ },
221
+ {
222
+ "epoch": 0.07,
223
+ "learning_rate": 1.9971452202458048e-05,
224
+ "loss": 2.6874,
225
+ "step": 180
226
+ },
227
+ {
228
+ "epoch": 0.07,
229
+ "learning_rate": 1.9969844994721338e-05,
230
+ "loss": 2.535,
231
+ "step": 185
232
+ },
233
+ {
234
+ "epoch": 0.08,
235
+ "learning_rate": 1.996819384369221e-05,
236
+ "loss": 2.5816,
237
+ "step": 190
238
+ },
239
+ {
240
+ "epoch": 0.08,
241
+ "learning_rate": 1.9966498756648305e-05,
242
+ "loss": 2.6225,
243
+ "step": 195
244
+ },
245
+ {
246
+ "epoch": 0.08,
247
+ "learning_rate": 1.9964759741060926e-05,
248
+ "loss": 2.5387,
249
+ "step": 200
250
+ },
251
+ {
252
+ "epoch": 0.08,
253
+ "learning_rate": 1.9962976804594993e-05,
254
+ "loss": 2.524,
255
+ "step": 205
256
+ },
257
+ {
258
+ "epoch": 0.08,
259
+ "learning_rate": 1.996114995510901e-05,
260
+ "loss": 2.488,
261
+ "step": 210
262
+ },
263
+ {
264
+ "epoch": 0.09,
265
+ "learning_rate": 1.9959279200655044e-05,
266
+ "loss": 2.5824,
267
+ "step": 215
268
+ },
269
+ {
270
+ "epoch": 0.09,
271
+ "learning_rate": 1.9957364549478663e-05,
272
+ "loss": 2.5828,
273
+ "step": 220
274
+ },
275
+ {
276
+ "epoch": 0.09,
277
+ "learning_rate": 1.9955406010018928e-05,
278
+ "loss": 2.5137,
279
+ "step": 225
280
+ },
281
+ {
282
+ "epoch": 0.09,
283
+ "learning_rate": 1.9953403590908334e-05,
284
+ "loss": 2.5539,
285
+ "step": 230
286
+ },
287
+ {
288
+ "epoch": 0.09,
289
+ "learning_rate": 1.995135730097278e-05,
290
+ "loss": 2.6099,
291
+ "step": 235
292
+ },
293
+ {
294
+ "epoch": 0.1,
295
+ "learning_rate": 1.994926714923155e-05,
296
+ "loss": 2.5309,
297
+ "step": 240
298
+ },
299
+ {
300
+ "epoch": 0.1,
301
+ "learning_rate": 1.9947133144897225e-05,
302
+ "loss": 2.5152,
303
+ "step": 245
304
+ },
305
+ {
306
+ "epoch": 0.1,
307
+ "learning_rate": 1.9944955297375693e-05,
308
+ "loss": 2.4738,
309
+ "step": 250
310
+ },
311
+ {
312
+ "epoch": 0.1,
313
+ "learning_rate": 1.9942733616266076e-05,
314
+ "loss": 2.5173,
315
+ "step": 255
316
+ },
317
+ {
318
+ "epoch": 0.1,
319
+ "learning_rate": 1.99404681113607e-05,
320
+ "loss": 2.578,
321
+ "step": 260
322
+ },
323
+ {
324
+ "epoch": 0.11,
325
+ "learning_rate": 1.993815879264506e-05,
326
+ "loss": 2.5089,
327
+ "step": 265
328
+ },
329
+ {
330
+ "epoch": 0.11,
331
+ "learning_rate": 1.9935805670297744e-05,
332
+ "loss": 2.5872,
333
+ "step": 270
334
+ },
335
+ {
336
+ "epoch": 0.11,
337
+ "learning_rate": 1.993340875469043e-05,
338
+ "loss": 2.5882,
339
+ "step": 275
340
+ },
341
+ {
342
+ "epoch": 0.11,
343
+ "learning_rate": 1.993096805638781e-05,
344
+ "loss": 2.5789,
345
+ "step": 280
346
+ },
347
+ {
348
+ "epoch": 0.11,
349
+ "learning_rate": 1.9928483586147553e-05,
350
+ "loss": 2.488,
351
+ "step": 285
352
+ },
353
+ {
354
+ "epoch": 0.12,
355
+ "learning_rate": 1.9925955354920265e-05,
356
+ "loss": 2.617,
357
+ "step": 290
358
+ },
359
+ {
360
+ "epoch": 0.12,
361
+ "learning_rate": 1.992338337384943e-05,
362
+ "loss": 2.5752,
363
+ "step": 295
364
+ },
365
+ {
366
+ "epoch": 0.12,
367
+ "learning_rate": 1.992076765427136e-05,
368
+ "loss": 2.5529,
369
+ "step": 300
370
+ },
371
+ {
372
+ "epoch": 0.12,
373
+ "learning_rate": 1.9918108207715156e-05,
374
+ "loss": 2.4942,
375
+ "step": 305
376
+ },
377
+ {
378
+ "epoch": 0.12,
379
+ "learning_rate": 1.991540504590265e-05,
380
+ "loss": 2.5456,
381
+ "step": 310
382
+ },
383
+ {
384
+ "epoch": 0.13,
385
+ "learning_rate": 1.991265818074835e-05,
386
+ "loss": 2.5957,
387
+ "step": 315
388
+ },
389
+ {
390
+ "epoch": 0.13,
391
+ "learning_rate": 1.99098676243594e-05,
392
+ "loss": 2.5699,
393
+ "step": 320
394
+ },
395
+ {
396
+ "epoch": 0.13,
397
+ "learning_rate": 1.9907033389035512e-05,
398
+ "loss": 2.5544,
399
+ "step": 325
400
+ },
401
+ {
402
+ "epoch": 0.13,
403
+ "learning_rate": 1.9904155487268912e-05,
404
+ "loss": 2.538,
405
+ "step": 330
406
+ },
407
+ {
408
+ "epoch": 0.13,
409
+ "learning_rate": 1.990123393174431e-05,
410
+ "loss": 2.6055,
411
+ "step": 335
412
+ },
413
+ {
414
+ "epoch": 0.14,
415
+ "learning_rate": 1.9898268735338807e-05,
416
+ "loss": 2.5846,
417
+ "step": 340
418
+ },
419
+ {
420
+ "epoch": 0.14,
421
+ "learning_rate": 1.9895259911121866e-05,
422
+ "loss": 2.5405,
423
+ "step": 345
424
+ },
425
+ {
426
+ "epoch": 0.14,
427
+ "learning_rate": 1.9892207472355243e-05,
428
+ "loss": 2.5162,
429
+ "step": 350
430
+ },
431
+ {
432
+ "epoch": 0.14,
433
+ "learning_rate": 1.988911143249292e-05,
434
+ "loss": 2.5484,
435
+ "step": 355
436
+ },
437
+ {
438
+ "epoch": 0.14,
439
+ "learning_rate": 1.9885971805181083e-05,
440
+ "loss": 2.671,
441
+ "step": 360
442
+ },
443
+ {
444
+ "epoch": 0.15,
445
+ "learning_rate": 1.9882788604258e-05,
446
+ "loss": 2.5696,
447
+ "step": 365
448
+ },
449
+ {
450
+ "epoch": 0.15,
451
+ "learning_rate": 1.987956184375402e-05,
452
+ "loss": 2.5021,
453
+ "step": 370
454
+ },
455
+ {
456
+ "epoch": 0.15,
457
+ "learning_rate": 1.9876291537891482e-05,
458
+ "loss": 2.5644,
459
+ "step": 375
460
+ },
461
+ {
462
+ "epoch": 0.15,
463
+ "learning_rate": 1.9872977701084645e-05,
464
+ "loss": 2.5386,
465
+ "step": 380
466
+ },
467
+ {
468
+ "epoch": 0.15,
469
+ "learning_rate": 1.9869620347939652e-05,
470
+ "loss": 2.554,
471
+ "step": 385
472
+ },
473
+ {
474
+ "epoch": 0.16,
475
+ "learning_rate": 1.9866219493254433e-05,
476
+ "loss": 2.4798,
477
+ "step": 390
478
+ },
479
+ {
480
+ "epoch": 0.16,
481
+ "learning_rate": 1.986277515201867e-05,
482
+ "loss": 2.538,
483
+ "step": 395
484
+ },
485
+ {
486
+ "epoch": 0.16,
487
+ "learning_rate": 1.9859287339413714e-05,
488
+ "loss": 2.5041,
489
+ "step": 400
490
+ },
491
+ {
492
+ "epoch": 0.16,
493
+ "learning_rate": 1.9855756070812514e-05,
494
+ "loss": 2.7067,
495
+ "step": 405
496
+ },
497
+ {
498
+ "epoch": 0.16,
499
+ "learning_rate": 1.9852181361779563e-05,
500
+ "loss": 2.5054,
501
+ "step": 410
502
+ },
503
+ {
504
+ "epoch": 0.17,
505
+ "learning_rate": 1.984856322807082e-05,
506
+ "loss": 2.4815,
507
+ "step": 415
508
+ },
509
+ {
510
+ "epoch": 0.17,
511
+ "learning_rate": 1.9844901685633648e-05,
512
+ "loss": 2.5885,
513
+ "step": 420
514
+ },
515
+ {
516
+ "epoch": 0.17,
517
+ "learning_rate": 1.9841196750606735e-05,
518
+ "loss": 2.521,
519
+ "step": 425
520
+ },
521
+ {
522
+ "epoch": 0.17,
523
+ "learning_rate": 1.9837448439320027e-05,
524
+ "loss": 2.4937,
525
+ "step": 430
526
+ },
527
+ {
528
+ "epoch": 0.17,
529
+ "learning_rate": 1.983365676829466e-05,
530
+ "loss": 2.5877,
531
+ "step": 435
532
+ },
533
+ {
534
+ "epoch": 0.18,
535
+ "learning_rate": 1.9829821754242885e-05,
536
+ "loss": 2.5761,
537
+ "step": 440
538
+ },
539
+ {
540
+ "epoch": 0.18,
541
+ "learning_rate": 1.9825943414067974e-05,
542
+ "loss": 2.4917,
543
+ "step": 445
544
+ },
545
+ {
546
+ "epoch": 0.18,
547
+ "learning_rate": 1.9822021764864194e-05,
548
+ "loss": 2.5434,
549
+ "step": 450
550
+ },
551
+ {
552
+ "epoch": 0.18,
553
+ "learning_rate": 1.9818056823916675e-05,
554
+ "loss": 2.5906,
555
+ "step": 455
556
+ },
557
+ {
558
+ "epoch": 0.18,
559
+ "learning_rate": 1.9814048608701374e-05,
560
+ "loss": 2.5508,
561
+ "step": 460
562
+ },
563
+ {
564
+ "epoch": 0.19,
565
+ "learning_rate": 1.980999713688499e-05,
566
+ "loss": 2.6005,
567
+ "step": 465
568
+ },
569
+ {
570
+ "epoch": 0.19,
571
+ "learning_rate": 1.980590242632486e-05,
572
+ "loss": 2.523,
573
+ "step": 470
574
+ },
575
+ {
576
+ "epoch": 0.19,
577
+ "learning_rate": 1.9801764495068923e-05,
578
+ "loss": 2.5453,
579
+ "step": 475
580
+ },
581
+ {
582
+ "epoch": 0.19,
583
+ "learning_rate": 1.979758336135561e-05,
584
+ "loss": 2.6426,
585
+ "step": 480
586
+ },
587
+ {
588
+ "epoch": 0.19,
589
+ "learning_rate": 1.9793359043613768e-05,
590
+ "loss": 2.5454,
591
+ "step": 485
592
+ },
593
+ {
594
+ "epoch": 0.2,
595
+ "learning_rate": 1.9789091560462587e-05,
596
+ "loss": 2.6071,
597
+ "step": 490
598
+ },
599
+ {
600
+ "epoch": 0.2,
601
+ "learning_rate": 1.9784780930711514e-05,
602
+ "loss": 2.5913,
603
+ "step": 495
604
+ },
605
+ {
606
+ "epoch": 0.2,
607
+ "learning_rate": 1.9780427173360165e-05,
608
+ "loss": 2.5082,
609
+ "step": 500
610
+ },
611
+ {
612
+ "epoch": 0.2,
613
+ "learning_rate": 1.977603030759825e-05,
614
+ "loss": 2.565,
615
+ "step": 505
616
+ },
617
+ {
618
+ "epoch": 0.2,
619
+ "learning_rate": 1.977159035280549e-05,
620
+ "loss": 2.6365,
621
+ "step": 510
622
+ },
623
+ {
624
+ "epoch": 0.21,
625
+ "learning_rate": 1.9767107328551515e-05,
626
+ "loss": 2.6872,
627
+ "step": 515
628
+ },
629
+ {
630
+ "epoch": 0.21,
631
+ "learning_rate": 1.9762581254595797e-05,
632
+ "loss": 2.6222,
633
+ "step": 520
634
+ },
635
+ {
636
+ "epoch": 0.21,
637
+ "learning_rate": 1.975801215088755e-05,
638
+ "loss": 2.5633,
639
+ "step": 525
640
+ },
641
+ {
642
+ "epoch": 0.21,
643
+ "learning_rate": 1.9753400037565653e-05,
644
+ "loss": 2.6579,
645
+ "step": 530
646
+ },
647
+ {
648
+ "epoch": 0.21,
649
+ "learning_rate": 1.9748744934958548e-05,
650
+ "loss": 2.5572,
651
+ "step": 535
652
+ },
653
+ {
654
+ "epoch": 0.22,
655
+ "learning_rate": 1.974404686358416e-05,
656
+ "loss": 2.5845,
657
+ "step": 540
658
+ },
659
+ {
660
+ "epoch": 0.22,
661
+ "learning_rate": 1.97393058441498e-05,
662
+ "loss": 2.6484,
663
+ "step": 545
664
+ },
665
+ {
666
+ "epoch": 0.22,
667
+ "learning_rate": 1.973452189755209e-05,
668
+ "loss": 2.5473,
669
+ "step": 550
670
+ },
671
+ {
672
+ "epoch": 0.22,
673
+ "learning_rate": 1.9729695044876847e-05,
674
+ "loss": 2.5995,
675
+ "step": 555
676
+ },
677
+ {
678
+ "epoch": 0.22,
679
+ "learning_rate": 1.9724825307399003e-05,
680
+ "loss": 2.5473,
681
+ "step": 560
682
+ },
683
+ {
684
+ "epoch": 0.23,
685
+ "learning_rate": 1.971991270658252e-05,
686
+ "loss": 2.5798,
687
+ "step": 565
688
+ },
689
+ {
690
+ "epoch": 0.23,
691
+ "learning_rate": 1.971495726408027e-05,
692
+ "loss": 2.655,
693
+ "step": 570
694
+ },
695
+ {
696
+ "epoch": 0.23,
697
+ "learning_rate": 1.970995900173397e-05,
698
+ "loss": 2.6904,
699
+ "step": 575
700
+ },
701
+ {
702
+ "epoch": 0.23,
703
+ "learning_rate": 1.9704917941574053e-05,
704
+ "loss": 2.6973,
705
+ "step": 580
706
+ },
707
+ {
708
+ "epoch": 0.23,
709
+ "learning_rate": 1.969983410581961e-05,
710
+ "loss": 2.6518,
711
+ "step": 585
712
+ },
713
+ {
714
+ "epoch": 0.24,
715
+ "learning_rate": 1.969470751687825e-05,
716
+ "loss": 2.5849,
717
+ "step": 590
718
+ },
719
+ {
720
+ "epoch": 0.24,
721
+ "learning_rate": 1.9689538197346035e-05,
722
+ "loss": 2.4715,
723
+ "step": 595
724
+ },
725
+ {
726
+ "epoch": 0.24,
727
+ "learning_rate": 1.9684326170007365e-05,
728
+ "loss": 2.7246,
729
+ "step": 600
730
+ },
731
+ {
732
+ "epoch": 0.24,
733
+ "learning_rate": 1.9679071457834874e-05,
734
+ "loss": 2.5482,
735
+ "step": 605
736
+ },
737
+ {
738
+ "epoch": 0.24,
739
+ "learning_rate": 1.967377408398934e-05,
740
+ "loss": 2.6084,
741
+ "step": 610
742
+ },
743
+ {
744
+ "epoch": 0.25,
745
+ "learning_rate": 1.966843407181958e-05,
746
+ "loss": 2.5144,
747
+ "step": 615
748
+ },
749
+ {
750
+ "epoch": 0.25,
751
+ "learning_rate": 1.9663051444862335e-05,
752
+ "loss": 2.7663,
753
+ "step": 620
754
+ },
755
+ {
756
+ "epoch": 0.25,
757
+ "learning_rate": 1.9657626226842187e-05,
758
+ "loss": 2.5697,
759
+ "step": 625
760
+ },
761
+ {
762
+ "epoch": 0.25,
763
+ "learning_rate": 1.9652158441671435e-05,
764
+ "loss": 2.6379,
765
+ "step": 630
766
+ },
767
+ {
768
+ "epoch": 0.25,
769
+ "learning_rate": 1.964664811345e-05,
770
+ "loss": 2.6784,
771
+ "step": 635
772
+ },
773
+ {
774
+ "epoch": 0.26,
775
+ "learning_rate": 1.964109526646532e-05,
776
+ "loss": 2.6936,
777
+ "step": 640
778
+ },
779
+ {
780
+ "epoch": 0.26,
781
+ "learning_rate": 1.963549992519223e-05,
782
+ "loss": 2.5672,
783
+ "step": 645
784
+ },
785
+ {
786
+ "epoch": 0.26,
787
+ "learning_rate": 1.962986211429288e-05,
788
+ "loss": 2.62,
789
+ "step": 650
790
+ },
791
+ {
792
+ "epoch": 0.26,
793
+ "learning_rate": 1.9624181858616593e-05,
794
+ "loss": 2.7293,
795
+ "step": 655
796
+ },
797
+ {
798
+ "epoch": 0.26,
799
+ "learning_rate": 1.9618459183199782e-05,
800
+ "loss": 2.6636,
801
+ "step": 660
802
+ },
803
+ {
804
+ "epoch": 0.27,
805
+ "learning_rate": 1.961269411326583e-05,
806
+ "loss": 2.5106,
807
+ "step": 665
808
+ },
809
+ {
810
+ "epoch": 0.27,
811
+ "learning_rate": 1.9606886674224977e-05,
812
+ "loss": 2.6878,
813
+ "step": 670
814
+ },
815
+ {
816
+ "epoch": 0.27,
817
+ "learning_rate": 1.960103689167421e-05,
818
+ "loss": 2.693,
819
+ "step": 675
820
+ },
821
+ {
822
+ "epoch": 0.27,
823
+ "learning_rate": 1.9595144791397142e-05,
824
+ "loss": 2.6562,
825
+ "step": 680
826
+ },
827
+ {
828
+ "epoch": 0.27,
829
+ "learning_rate": 1.9589210399363925e-05,
830
+ "loss": 2.6269,
831
+ "step": 685
832
+ },
833
+ {
834
+ "epoch": 0.28,
835
+ "learning_rate": 1.95832337417311e-05,
836
+ "loss": 2.6459,
837
+ "step": 690
838
+ },
839
+ {
840
+ "epoch": 0.28,
841
+ "learning_rate": 1.9577214844841515e-05,
842
+ "loss": 2.5765,
843
+ "step": 695
844
+ },
845
+ {
846
+ "epoch": 0.28,
847
+ "learning_rate": 1.957115373522417e-05,
848
+ "loss": 2.6113,
849
+ "step": 700
850
+ },
851
+ {
852
+ "epoch": 0.28,
853
+ "learning_rate": 1.956505043959414e-05,
854
+ "loss": 2.6641,
855
+ "step": 705
856
+ },
857
+ {
858
+ "epoch": 0.28,
859
+ "learning_rate": 1.955890498485244e-05,
860
+ "loss": 2.6493,
861
+ "step": 710
862
+ },
863
+ {
864
+ "epoch": 0.29,
865
+ "learning_rate": 1.9552717398085898e-05,
866
+ "loss": 2.6135,
867
+ "step": 715
868
+ },
869
+ {
870
+ "epoch": 0.29,
871
+ "learning_rate": 1.954648770656705e-05,
872
+ "loss": 2.6182,
873
+ "step": 720
874
+ },
875
+ {
876
+ "epoch": 0.29,
877
+ "learning_rate": 1.954021593775401e-05,
878
+ "loss": 2.6487,
879
+ "step": 725
880
+ },
881
+ {
882
+ "epoch": 0.29,
883
+ "learning_rate": 1.9533902119290352e-05,
884
+ "loss": 2.5927,
885
+ "step": 730
886
+ },
887
+ {
888
+ "epoch": 0.29,
889
+ "learning_rate": 1.952754627900499e-05,
890
+ "loss": 2.5825,
891
+ "step": 735
892
+ },
893
+ {
894
+ "epoch": 0.3,
895
+ "learning_rate": 1.9521148444912065e-05,
896
+ "loss": 2.7193,
897
+ "step": 740
898
+ },
899
+ {
900
+ "epoch": 0.3,
901
+ "learning_rate": 1.9514708645210793e-05,
902
+ "loss": 2.5802,
903
+ "step": 745
904
+ },
905
+ {
906
+ "epoch": 0.3,
907
+ "learning_rate": 1.9508226908285368e-05,
908
+ "loss": 2.6628,
909
+ "step": 750
910
+ },
911
+ {
912
+ "epoch": 0.3,
913
+ "learning_rate": 1.950170326270483e-05,
914
+ "loss": 2.5847,
915
+ "step": 755
916
+ },
917
+ {
918
+ "epoch": 0.3,
919
+ "learning_rate": 1.9495137737222925e-05,
920
+ "loss": 2.6594,
921
+ "step": 760
922
+ },
923
+ {
924
+ "epoch": 0.31,
925
+ "learning_rate": 1.9488530360778007e-05,
926
+ "loss": 2.6096,
927
+ "step": 765
928
+ },
929
+ {
930
+ "epoch": 0.31,
931
+ "learning_rate": 1.948188116249287e-05,
932
+ "loss": 2.6378,
933
+ "step": 770
934
+ },
935
+ {
936
+ "epoch": 0.31,
937
+ "learning_rate": 1.9475190171674675e-05,
938
+ "loss": 2.5984,
939
+ "step": 775
940
+ },
941
+ {
942
+ "epoch": 0.31,
943
+ "learning_rate": 1.9468457417814753e-05,
944
+ "loss": 2.6437,
945
+ "step": 780
946
+ },
947
+ {
948
+ "epoch": 0.31,
949
+ "learning_rate": 1.9461682930588534e-05,
950
+ "loss": 2.6522,
951
+ "step": 785
952
+ },
953
+ {
954
+ "epoch": 0.32,
955
+ "learning_rate": 1.9454866739855384e-05,
956
+ "loss": 2.6242,
957
+ "step": 790
958
+ },
959
+ {
960
+ "epoch": 0.32,
961
+ "learning_rate": 1.944800887565849e-05,
962
+ "loss": 2.5961,
963
+ "step": 795
964
+ },
965
+ {
966
+ "epoch": 0.32,
967
+ "learning_rate": 1.9441109368224704e-05,
968
+ "loss": 2.6365,
969
+ "step": 800
970
+ },
971
+ {
972
+ "epoch": 0.32,
973
+ "learning_rate": 1.9434168247964447e-05,
974
+ "loss": 2.5674,
975
+ "step": 805
976
+ },
977
+ {
978
+ "epoch": 0.32,
979
+ "learning_rate": 1.9427185545471537e-05,
980
+ "loss": 2.6369,
981
+ "step": 810
982
+ },
983
+ {
984
+ "epoch": 0.33,
985
+ "learning_rate": 1.9420161291523076e-05,
986
+ "loss": 2.5763,
987
+ "step": 815
988
+ },
989
+ {
990
+ "epoch": 0.33,
991
+ "learning_rate": 1.941309551707931e-05,
992
+ "loss": 2.5651,
993
+ "step": 820
994
+ },
995
+ {
996
+ "epoch": 0.33,
997
+ "learning_rate": 1.9405988253283492e-05,
998
+ "loss": 2.6223,
999
+ "step": 825
1000
+ },
1001
+ {
1002
+ "epoch": 0.33,
1003
+ "learning_rate": 1.939883953146174e-05,
1004
+ "loss": 2.6616,
1005
+ "step": 830
1006
+ },
1007
+ {
1008
+ "epoch": 0.33,
1009
+ "learning_rate": 1.939164938312291e-05,
1010
+ "loss": 2.6496,
1011
+ "step": 835
1012
+ },
1013
+ {
1014
+ "epoch": 0.34,
1015
+ "learning_rate": 1.9384417839958443e-05,
1016
+ "loss": 2.7161,
1017
+ "step": 840
1018
+ },
1019
+ {
1020
+ "epoch": 0.34,
1021
+ "learning_rate": 1.937714493384224e-05,
1022
+ "loss": 2.6047,
1023
+ "step": 845
1024
+ },
1025
+ {
1026
+ "epoch": 0.34,
1027
+ "learning_rate": 1.936983069683051e-05,
1028
+ "loss": 2.5978,
1029
+ "step": 850
1030
+ },
1031
+ {
1032
+ "epoch": 0.34,
1033
+ "learning_rate": 1.936247516116163e-05,
1034
+ "loss": 2.6331,
1035
+ "step": 855
1036
+ },
1037
+ {
1038
+ "epoch": 0.34,
1039
+ "learning_rate": 1.935507835925601e-05,
1040
+ "loss": 2.5679,
1041
+ "step": 860
1042
+ },
1043
+ {
1044
+ "epoch": 0.35,
1045
+ "learning_rate": 1.934764032371595e-05,
1046
+ "loss": 2.6647,
1047
+ "step": 865
1048
+ },
1049
+ {
1050
+ "epoch": 0.35,
1051
+ "learning_rate": 1.9340161087325483e-05,
1052
+ "loss": 2.6142,
1053
+ "step": 870
1054
+ },
1055
+ {
1056
+ "epoch": 0.35,
1057
+ "learning_rate": 1.9332640683050243e-05,
1058
+ "loss": 2.6181,
1059
+ "step": 875
1060
+ },
1061
+ {
1062
+ "epoch": 0.35,
1063
+ "learning_rate": 1.932507914403732e-05,
1064
+ "loss": 2.6105,
1065
+ "step": 880
1066
+ },
1067
+ {
1068
+ "epoch": 0.35,
1069
+ "learning_rate": 1.9317476503615108e-05,
1070
+ "loss": 2.6415,
1071
+ "step": 885
1072
+ },
1073
+ {
1074
+ "epoch": 0.36,
1075
+ "learning_rate": 1.9309832795293156e-05,
1076
+ "loss": 2.6736,
1077
+ "step": 890
1078
+ },
1079
+ {
1080
+ "epoch": 0.36,
1081
+ "learning_rate": 1.930214805276204e-05,
1082
+ "loss": 2.6507,
1083
+ "step": 895
1084
+ },
1085
+ {
1086
+ "epoch": 0.36,
1087
+ "learning_rate": 1.9294422309893177e-05,
1088
+ "loss": 2.6771,
1089
+ "step": 900
1090
+ },
1091
+ {
1092
+ "epoch": 0.36,
1093
+ "learning_rate": 1.9286655600738707e-05,
1094
+ "loss": 2.6763,
1095
+ "step": 905
1096
+ },
1097
+ {
1098
+ "epoch": 0.36,
1099
+ "learning_rate": 1.9278847959531348e-05,
1100
+ "loss": 2.7135,
1101
+ "step": 910
1102
+ },
1103
+ {
1104
+ "epoch": 0.37,
1105
+ "learning_rate": 1.927099942068421e-05,
1106
+ "loss": 2.6166,
1107
+ "step": 915
1108
+ },
1109
+ {
1110
+ "epoch": 0.37,
1111
+ "learning_rate": 1.9263110018790673e-05,
1112
+ "loss": 2.6238,
1113
+ "step": 920
1114
+ },
1115
+ {
1116
+ "epoch": 0.37,
1117
+ "learning_rate": 1.9255179788624233e-05,
1118
+ "loss": 2.6424,
1119
+ "step": 925
1120
+ },
1121
+ {
1122
+ "epoch": 0.37,
1123
+ "learning_rate": 1.9247208765138325e-05,
1124
+ "loss": 2.6023,
1125
+ "step": 930
1126
+ },
1127
+ {
1128
+ "epoch": 0.37,
1129
+ "learning_rate": 1.9239196983466204e-05,
1130
+ "loss": 2.58,
1131
+ "step": 935
1132
+ },
1133
+ {
1134
+ "epoch": 0.38,
1135
+ "learning_rate": 1.9231144478920756e-05,
1136
+ "loss": 2.6173,
1137
+ "step": 940
1138
+ },
1139
+ {
1140
+ "epoch": 0.38,
1141
+ "learning_rate": 1.9223051286994368e-05,
1142
+ "loss": 2.628,
1143
+ "step": 945
1144
+ },
1145
+ {
1146
+ "epoch": 0.38,
1147
+ "learning_rate": 1.9214917443358753e-05,
1148
+ "loss": 2.6868,
1149
+ "step": 950
1150
+ },
1151
+ {
1152
+ "epoch": 0.38,
1153
+ "learning_rate": 1.9206742983864813e-05,
1154
+ "loss": 2.6342,
1155
+ "step": 955
1156
+ },
1157
+ {
1158
+ "epoch": 0.38,
1159
+ "learning_rate": 1.9198527944542462e-05,
1160
+ "loss": 2.5934,
1161
+ "step": 960
1162
+ },
1163
+ {
1164
+ "epoch": 0.39,
1165
+ "learning_rate": 1.919027236160047e-05,
1166
+ "loss": 2.6354,
1167
+ "step": 965
1168
+ },
1169
+ {
1170
+ "epoch": 0.39,
1171
+ "learning_rate": 1.9181976271426315e-05,
1172
+ "loss": 2.5955,
1173
+ "step": 970
1174
+ },
1175
+ {
1176
+ "epoch": 0.39,
1177
+ "learning_rate": 1.9173639710586015e-05,
1178
+ "loss": 2.6134,
1179
+ "step": 975
1180
+ },
1181
+ {
1182
+ "epoch": 0.39,
1183
+ "learning_rate": 1.9165262715823966e-05,
1184
+ "loss": 2.7,
1185
+ "step": 980
1186
+ },
1187
+ {
1188
+ "epoch": 0.39,
1189
+ "learning_rate": 1.915684532406278e-05,
1190
+ "loss": 2.7197,
1191
+ "step": 985
1192
+ },
1193
+ {
1194
+ "epoch": 0.4,
1195
+ "learning_rate": 1.9148387572403123e-05,
1196
+ "loss": 2.4881,
1197
+ "step": 990
1198
+ },
1199
+ {
1200
+ "epoch": 0.4,
1201
+ "learning_rate": 1.913988949812356e-05,
1202
+ "loss": 2.6999,
1203
+ "step": 995
1204
+ },
1205
+ {
1206
+ "epoch": 0.4,
1207
+ "learning_rate": 1.9131351138680368e-05,
1208
+ "loss": 2.5981,
1209
+ "step": 1000
1210
+ }
1211
+ ],
1212
+ "logging_steps": 5,
1213
+ "max_steps": 7482,
1214
+ "num_input_tokens_seen": 0,
1215
+ "num_train_epochs": 3,
1216
+ "save_steps": 100,
1217
+ "total_flos": 1.338041094340608e+16,
1218
+ "train_batch_size": 4,
1219
+ "trial_name": null,
1220
+ "trial_params": null
1221
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c481a87016b183d9406558b8812ee05f00d60a3c721055833e3cdda34cf9bb26
3
+ size 4920
checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1100/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-0.5B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.8.2
checkpoint-1100/adapter_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-0.5B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_rslora": false
27
+ }
checkpoint-1100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9ea80b3c97be6fe2b7210aa1e1c9f7cc71203f1eeebf3252a7c8e277f61aa40
3
+ size 3158328
checkpoint-1100/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-1100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a906b82ea9f377db0e3fddcdbeb72d0ad945f37b639288eea867affedbdc8d3c
3
+ size 6372346
checkpoint-1100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f8a19ec786e929d2ec5cf00a109504e163e58f6a85cd2e06e12f302d94820d4
3
+ size 14244
checkpoint-1100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d48b5a7719cce1a4a890f8541be4f95f09fe9a3bb8b54924212960109bda920
3
+ size 1064
checkpoint-1100/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-1100/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\n'}}{% endif %}{% endfor %}{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "padding_side": "right",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
checkpoint-1100/trainer_state.json ADDED
@@ -0,0 +1,1341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.4410585404971933,
5
+ "eval_steps": 500,
6
+ "global_step": 1100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.999997796189788e-05,
14
+ "loss": 2.7465,
15
+ "step": 5
16
+ },
17
+ {
18
+ "epoch": 0.0,
19
+ "learning_rate": 1.9999911847688657e-05,
20
+ "loss": 2.6877,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.01,
25
+ "learning_rate": 1.999980165766374e-05,
26
+ "loss": 2.6848,
27
+ "step": 15
28
+ },
29
+ {
30
+ "epoch": 0.01,
31
+ "learning_rate": 1.9999647392308798e-05,
32
+ "loss": 2.7677,
33
+ "step": 20
34
+ },
35
+ {
36
+ "epoch": 0.01,
37
+ "learning_rate": 1.9999449052303777e-05,
38
+ "loss": 2.7784,
39
+ "step": 25
40
+ },
41
+ {
42
+ "epoch": 0.01,
43
+ "learning_rate": 1.9999206638522888e-05,
44
+ "loss": 2.8161,
45
+ "step": 30
46
+ },
47
+ {
48
+ "epoch": 0.01,
49
+ "learning_rate": 1.9998920152034595e-05,
50
+ "loss": 2.6752,
51
+ "step": 35
52
+ },
53
+ {
54
+ "epoch": 0.02,
55
+ "learning_rate": 1.9998589594101623e-05,
56
+ "loss": 2.6403,
57
+ "step": 40
58
+ },
59
+ {
60
+ "epoch": 0.02,
61
+ "learning_rate": 1.9998214966180948e-05,
62
+ "loss": 2.6433,
63
+ "step": 45
64
+ },
65
+ {
66
+ "epoch": 0.02,
67
+ "learning_rate": 1.999779626992378e-05,
68
+ "loss": 2.5787,
69
+ "step": 50
70
+ },
71
+ {
72
+ "epoch": 0.02,
73
+ "learning_rate": 1.9997333507175583e-05,
74
+ "loss": 2.6812,
75
+ "step": 55
76
+ },
77
+ {
78
+ "epoch": 0.02,
79
+ "learning_rate": 1.9996826679976033e-05,
80
+ "loss": 2.7287,
81
+ "step": 60
82
+ },
83
+ {
84
+ "epoch": 0.03,
85
+ "learning_rate": 1.9996275790559037e-05,
86
+ "loss": 2.6465,
87
+ "step": 65
88
+ },
89
+ {
90
+ "epoch": 0.03,
91
+ "learning_rate": 1.99956808413527e-05,
92
+ "loss": 2.5268,
93
+ "step": 70
94
+ },
95
+ {
96
+ "epoch": 0.03,
97
+ "learning_rate": 1.999504183497934e-05,
98
+ "loss": 2.6893,
99
+ "step": 75
100
+ },
101
+ {
102
+ "epoch": 0.03,
103
+ "learning_rate": 1.9994358774255444e-05,
104
+ "loss": 2.6274,
105
+ "step": 80
106
+ },
107
+ {
108
+ "epoch": 0.03,
109
+ "learning_rate": 1.9993631662191696e-05,
110
+ "loss": 2.6232,
111
+ "step": 85
112
+ },
113
+ {
114
+ "epoch": 0.04,
115
+ "learning_rate": 1.9992860501992924e-05,
116
+ "loss": 2.7188,
117
+ "step": 90
118
+ },
119
+ {
120
+ "epoch": 0.04,
121
+ "learning_rate": 1.9992045297058108e-05,
122
+ "loss": 2.5388,
123
+ "step": 95
124
+ },
125
+ {
126
+ "epoch": 0.04,
127
+ "learning_rate": 1.9991186050980366e-05,
128
+ "loss": 2.6793,
129
+ "step": 100
130
+ },
131
+ {
132
+ "epoch": 0.04,
133
+ "learning_rate": 1.9990282767546926e-05,
134
+ "loss": 2.5523,
135
+ "step": 105
136
+ },
137
+ {
138
+ "epoch": 0.04,
139
+ "learning_rate": 1.998933545073912e-05,
140
+ "loss": 2.5763,
141
+ "step": 110
142
+ },
143
+ {
144
+ "epoch": 0.05,
145
+ "learning_rate": 1.998834410473236e-05,
146
+ "loss": 2.6447,
147
+ "step": 115
148
+ },
149
+ {
150
+ "epoch": 0.05,
151
+ "learning_rate": 1.998730873389612e-05,
152
+ "loss": 2.5579,
153
+ "step": 120
154
+ },
155
+ {
156
+ "epoch": 0.05,
157
+ "learning_rate": 1.998622934279393e-05,
158
+ "loss": 2.5884,
159
+ "step": 125
160
+ },
161
+ {
162
+ "epoch": 0.05,
163
+ "learning_rate": 1.9985105936183327e-05,
164
+ "loss": 2.5051,
165
+ "step": 130
166
+ },
167
+ {
168
+ "epoch": 0.05,
169
+ "learning_rate": 1.9983938519015868e-05,
170
+ "loss": 2.6014,
171
+ "step": 135
172
+ },
173
+ {
174
+ "epoch": 0.06,
175
+ "learning_rate": 1.998272709643708e-05,
176
+ "loss": 2.5878,
177
+ "step": 140
178
+ },
179
+ {
180
+ "epoch": 0.06,
181
+ "learning_rate": 1.998147167378645e-05,
182
+ "loss": 2.6642,
183
+ "step": 145
184
+ },
185
+ {
186
+ "epoch": 0.06,
187
+ "learning_rate": 1.998017225659742e-05,
188
+ "loss": 2.5077,
189
+ "step": 150
190
+ },
191
+ {
192
+ "epoch": 0.06,
193
+ "learning_rate": 1.9978828850597312e-05,
194
+ "loss": 2.5921,
195
+ "step": 155
196
+ },
197
+ {
198
+ "epoch": 0.06,
199
+ "learning_rate": 1.9977441461707358e-05,
200
+ "loss": 2.5577,
201
+ "step": 160
202
+ },
203
+ {
204
+ "epoch": 0.07,
205
+ "learning_rate": 1.9976010096042634e-05,
206
+ "loss": 2.524,
207
+ "step": 165
208
+ },
209
+ {
210
+ "epoch": 0.07,
211
+ "learning_rate": 1.9974534759912068e-05,
212
+ "loss": 2.5708,
213
+ "step": 170
214
+ },
215
+ {
216
+ "epoch": 0.07,
217
+ "learning_rate": 1.997301545981837e-05,
218
+ "loss": 2.5578,
219
+ "step": 175
220
+ },
221
+ {
222
+ "epoch": 0.07,
223
+ "learning_rate": 1.9971452202458048e-05,
224
+ "loss": 2.6874,
225
+ "step": 180
226
+ },
227
+ {
228
+ "epoch": 0.07,
229
+ "learning_rate": 1.9969844994721338e-05,
230
+ "loss": 2.535,
231
+ "step": 185
232
+ },
233
+ {
234
+ "epoch": 0.08,
235
+ "learning_rate": 1.996819384369221e-05,
236
+ "loss": 2.5816,
237
+ "step": 190
238
+ },
239
+ {
240
+ "epoch": 0.08,
241
+ "learning_rate": 1.9966498756648305e-05,
242
+ "loss": 2.6225,
243
+ "step": 195
244
+ },
245
+ {
246
+ "epoch": 0.08,
247
+ "learning_rate": 1.9964759741060926e-05,
248
+ "loss": 2.5387,
249
+ "step": 200
250
+ },
251
+ {
252
+ "epoch": 0.08,
253
+ "learning_rate": 1.9962976804594993e-05,
254
+ "loss": 2.524,
255
+ "step": 205
256
+ },
257
+ {
258
+ "epoch": 0.08,
259
+ "learning_rate": 1.996114995510901e-05,
260
+ "loss": 2.488,
261
+ "step": 210
262
+ },
263
+ {
264
+ "epoch": 0.09,
265
+ "learning_rate": 1.9959279200655044e-05,
266
+ "loss": 2.5824,
267
+ "step": 215
268
+ },
269
+ {
270
+ "epoch": 0.09,
271
+ "learning_rate": 1.9957364549478663e-05,
272
+ "loss": 2.5828,
273
+ "step": 220
274
+ },
275
+ {
276
+ "epoch": 0.09,
277
+ "learning_rate": 1.9955406010018928e-05,
278
+ "loss": 2.5137,
279
+ "step": 225
280
+ },
281
+ {
282
+ "epoch": 0.09,
283
+ "learning_rate": 1.9953403590908334e-05,
284
+ "loss": 2.5539,
285
+ "step": 230
286
+ },
287
+ {
288
+ "epoch": 0.09,
289
+ "learning_rate": 1.995135730097278e-05,
290
+ "loss": 2.6099,
291
+ "step": 235
292
+ },
293
+ {
294
+ "epoch": 0.1,
295
+ "learning_rate": 1.994926714923155e-05,
296
+ "loss": 2.5309,
297
+ "step": 240
298
+ },
299
+ {
300
+ "epoch": 0.1,
301
+ "learning_rate": 1.9947133144897225e-05,
302
+ "loss": 2.5152,
303
+ "step": 245
304
+ },
305
+ {
306
+ "epoch": 0.1,
307
+ "learning_rate": 1.9944955297375693e-05,
308
+ "loss": 2.4738,
309
+ "step": 250
310
+ },
311
+ {
312
+ "epoch": 0.1,
313
+ "learning_rate": 1.9942733616266076e-05,
314
+ "loss": 2.5173,
315
+ "step": 255
316
+ },
317
+ {
318
+ "epoch": 0.1,
319
+ "learning_rate": 1.99404681113607e-05,
320
+ "loss": 2.578,
321
+ "step": 260
322
+ },
323
+ {
324
+ "epoch": 0.11,
325
+ "learning_rate": 1.993815879264506e-05,
326
+ "loss": 2.5089,
327
+ "step": 265
328
+ },
329
+ {
330
+ "epoch": 0.11,
331
+ "learning_rate": 1.9935805670297744e-05,
332
+ "loss": 2.5872,
333
+ "step": 270
334
+ },
335
+ {
336
+ "epoch": 0.11,
337
+ "learning_rate": 1.993340875469043e-05,
338
+ "loss": 2.5882,
339
+ "step": 275
340
+ },
341
+ {
342
+ "epoch": 0.11,
343
+ "learning_rate": 1.993096805638781e-05,
344
+ "loss": 2.5789,
345
+ "step": 280
346
+ },
347
+ {
348
+ "epoch": 0.11,
349
+ "learning_rate": 1.9928483586147553e-05,
350
+ "loss": 2.488,
351
+ "step": 285
352
+ },
353
+ {
354
+ "epoch": 0.12,
355
+ "learning_rate": 1.9925955354920265e-05,
356
+ "loss": 2.617,
357
+ "step": 290
358
+ },
359
+ {
360
+ "epoch": 0.12,
361
+ "learning_rate": 1.992338337384943e-05,
362
+ "loss": 2.5752,
363
+ "step": 295
364
+ },
365
+ {
366
+ "epoch": 0.12,
367
+ "learning_rate": 1.992076765427136e-05,
368
+ "loss": 2.5529,
369
+ "step": 300
370
+ },
371
+ {
372
+ "epoch": 0.12,
373
+ "learning_rate": 1.9918108207715156e-05,
374
+ "loss": 2.4942,
375
+ "step": 305
376
+ },
377
+ {
378
+ "epoch": 0.12,
379
+ "learning_rate": 1.991540504590265e-05,
380
+ "loss": 2.5456,
381
+ "step": 310
382
+ },
383
+ {
384
+ "epoch": 0.13,
385
+ "learning_rate": 1.991265818074835e-05,
386
+ "loss": 2.5957,
387
+ "step": 315
388
+ },
389
+ {
390
+ "epoch": 0.13,
391
+ "learning_rate": 1.99098676243594e-05,
392
+ "loss": 2.5699,
393
+ "step": 320
394
+ },
395
+ {
396
+ "epoch": 0.13,
397
+ "learning_rate": 1.9907033389035512e-05,
398
+ "loss": 2.5544,
399
+ "step": 325
400
+ },
401
+ {
402
+ "epoch": 0.13,
403
+ "learning_rate": 1.9904155487268912e-05,
404
+ "loss": 2.538,
405
+ "step": 330
406
+ },
407
+ {
408
+ "epoch": 0.13,
409
+ "learning_rate": 1.990123393174431e-05,
410
+ "loss": 2.6055,
411
+ "step": 335
412
+ },
413
+ {
414
+ "epoch": 0.14,
415
+ "learning_rate": 1.9898268735338807e-05,
416
+ "loss": 2.5846,
417
+ "step": 340
418
+ },
419
+ {
420
+ "epoch": 0.14,
421
+ "learning_rate": 1.9895259911121866e-05,
422
+ "loss": 2.5405,
423
+ "step": 345
424
+ },
425
+ {
426
+ "epoch": 0.14,
427
+ "learning_rate": 1.9892207472355243e-05,
428
+ "loss": 2.5162,
429
+ "step": 350
430
+ },
431
+ {
432
+ "epoch": 0.14,
433
+ "learning_rate": 1.988911143249292e-05,
434
+ "loss": 2.5484,
435
+ "step": 355
436
+ },
437
+ {
438
+ "epoch": 0.14,
439
+ "learning_rate": 1.9885971805181083e-05,
440
+ "loss": 2.671,
441
+ "step": 360
442
+ },
443
+ {
444
+ "epoch": 0.15,
445
+ "learning_rate": 1.9882788604258e-05,
446
+ "loss": 2.5696,
447
+ "step": 365
448
+ },
449
+ {
450
+ "epoch": 0.15,
451
+ "learning_rate": 1.987956184375402e-05,
452
+ "loss": 2.5021,
453
+ "step": 370
454
+ },
455
+ {
456
+ "epoch": 0.15,
457
+ "learning_rate": 1.9876291537891482e-05,
458
+ "loss": 2.5644,
459
+ "step": 375
460
+ },
461
+ {
462
+ "epoch": 0.15,
463
+ "learning_rate": 1.9872977701084645e-05,
464
+ "loss": 2.5386,
465
+ "step": 380
466
+ },
467
+ {
468
+ "epoch": 0.15,
469
+ "learning_rate": 1.9869620347939652e-05,
470
+ "loss": 2.554,
471
+ "step": 385
472
+ },
473
+ {
474
+ "epoch": 0.16,
475
+ "learning_rate": 1.9866219493254433e-05,
476
+ "loss": 2.4798,
477
+ "step": 390
478
+ },
479
+ {
480
+ "epoch": 0.16,
481
+ "learning_rate": 1.986277515201867e-05,
482
+ "loss": 2.538,
483
+ "step": 395
484
+ },
485
+ {
486
+ "epoch": 0.16,
487
+ "learning_rate": 1.9859287339413714e-05,
488
+ "loss": 2.5041,
489
+ "step": 400
490
+ },
491
+ {
492
+ "epoch": 0.16,
493
+ "learning_rate": 1.9855756070812514e-05,
494
+ "loss": 2.7067,
495
+ "step": 405
496
+ },
497
+ {
498
+ "epoch": 0.16,
499
+ "learning_rate": 1.9852181361779563e-05,
500
+ "loss": 2.5054,
501
+ "step": 410
502
+ },
503
+ {
504
+ "epoch": 0.17,
505
+ "learning_rate": 1.984856322807082e-05,
506
+ "loss": 2.4815,
507
+ "step": 415
508
+ },
509
+ {
510
+ "epoch": 0.17,
511
+ "learning_rate": 1.9844901685633648e-05,
512
+ "loss": 2.5885,
513
+ "step": 420
514
+ },
515
+ {
516
+ "epoch": 0.17,
517
+ "learning_rate": 1.9841196750606735e-05,
518
+ "loss": 2.521,
519
+ "step": 425
520
+ },
521
+ {
522
+ "epoch": 0.17,
523
+ "learning_rate": 1.9837448439320027e-05,
524
+ "loss": 2.4937,
525
+ "step": 430
526
+ },
527
+ {
528
+ "epoch": 0.17,
529
+ "learning_rate": 1.983365676829466e-05,
530
+ "loss": 2.5877,
531
+ "step": 435
532
+ },
533
+ {
534
+ "epoch": 0.18,
535
+ "learning_rate": 1.9829821754242885e-05,
536
+ "loss": 2.5761,
537
+ "step": 440
538
+ },
539
+ {
540
+ "epoch": 0.18,
541
+ "learning_rate": 1.9825943414067974e-05,
542
+ "loss": 2.4917,
543
+ "step": 445
544
+ },
545
+ {
546
+ "epoch": 0.18,
547
+ "learning_rate": 1.9822021764864194e-05,
548
+ "loss": 2.5434,
549
+ "step": 450
550
+ },
551
+ {
552
+ "epoch": 0.18,
553
+ "learning_rate": 1.9818056823916675e-05,
554
+ "loss": 2.5906,
555
+ "step": 455
556
+ },
557
+ {
558
+ "epoch": 0.18,
559
+ "learning_rate": 1.9814048608701374e-05,
560
+ "loss": 2.5508,
561
+ "step": 460
562
+ },
563
+ {
564
+ "epoch": 0.19,
565
+ "learning_rate": 1.980999713688499e-05,
566
+ "loss": 2.6005,
567
+ "step": 465
568
+ },
569
+ {
570
+ "epoch": 0.19,
571
+ "learning_rate": 1.980590242632486e-05,
572
+ "loss": 2.523,
573
+ "step": 470
574
+ },
575
+ {
576
+ "epoch": 0.19,
577
+ "learning_rate": 1.9801764495068923e-05,
578
+ "loss": 2.5453,
579
+ "step": 475
580
+ },
581
+ {
582
+ "epoch": 0.19,
583
+ "learning_rate": 1.979758336135561e-05,
584
+ "loss": 2.6426,
585
+ "step": 480
586
+ },
587
+ {
588
+ "epoch": 0.19,
589
+ "learning_rate": 1.9793359043613768e-05,
590
+ "loss": 2.5454,
591
+ "step": 485
592
+ },
593
+ {
594
+ "epoch": 0.2,
595
+ "learning_rate": 1.9789091560462587e-05,
596
+ "loss": 2.6071,
597
+ "step": 490
598
+ },
599
+ {
600
+ "epoch": 0.2,
601
+ "learning_rate": 1.9784780930711514e-05,
602
+ "loss": 2.5913,
603
+ "step": 495
604
+ },
605
+ {
606
+ "epoch": 0.2,
607
+ "learning_rate": 1.9780427173360165e-05,
608
+ "loss": 2.5082,
609
+ "step": 500
610
+ },
611
+ {
612
+ "epoch": 0.2,
613
+ "learning_rate": 1.977603030759825e-05,
614
+ "loss": 2.565,
615
+ "step": 505
616
+ },
617
+ {
618
+ "epoch": 0.2,
619
+ "learning_rate": 1.977159035280549e-05,
620
+ "loss": 2.6365,
621
+ "step": 510
622
+ },
623
+ {
624
+ "epoch": 0.21,
625
+ "learning_rate": 1.9767107328551515e-05,
626
+ "loss": 2.6872,
627
+ "step": 515
628
+ },
629
+ {
630
+ "epoch": 0.21,
631
+ "learning_rate": 1.9762581254595797e-05,
632
+ "loss": 2.6222,
633
+ "step": 520
634
+ },
635
+ {
636
+ "epoch": 0.21,
637
+ "learning_rate": 1.975801215088755e-05,
638
+ "loss": 2.5633,
639
+ "step": 525
640
+ },
641
+ {
642
+ "epoch": 0.21,
643
+ "learning_rate": 1.9753400037565653e-05,
644
+ "loss": 2.6579,
645
+ "step": 530
646
+ },
647
+ {
648
+ "epoch": 0.21,
649
+ "learning_rate": 1.9748744934958548e-05,
650
+ "loss": 2.5572,
651
+ "step": 535
652
+ },
653
+ {
654
+ "epoch": 0.22,
655
+ "learning_rate": 1.974404686358416e-05,
656
+ "loss": 2.5845,
657
+ "step": 540
658
+ },
659
+ {
660
+ "epoch": 0.22,
661
+ "learning_rate": 1.97393058441498e-05,
662
+ "loss": 2.6484,
663
+ "step": 545
664
+ },
665
+ {
666
+ "epoch": 0.22,
667
+ "learning_rate": 1.973452189755209e-05,
668
+ "loss": 2.5473,
669
+ "step": 550
670
+ },
671
+ {
672
+ "epoch": 0.22,
673
+ "learning_rate": 1.9729695044876847e-05,
674
+ "loss": 2.5995,
675
+ "step": 555
676
+ },
677
+ {
678
+ "epoch": 0.22,
679
+ "learning_rate": 1.9724825307399003e-05,
680
+ "loss": 2.5473,
681
+ "step": 560
682
+ },
683
+ {
684
+ "epoch": 0.23,
685
+ "learning_rate": 1.971991270658252e-05,
686
+ "loss": 2.5798,
687
+ "step": 565
688
+ },
689
+ {
690
+ "epoch": 0.23,
691
+ "learning_rate": 1.971495726408027e-05,
692
+ "loss": 2.655,
693
+ "step": 570
694
+ },
695
+ {
696
+ "epoch": 0.23,
697
+ "learning_rate": 1.970995900173397e-05,
698
+ "loss": 2.6904,
699
+ "step": 575
700
+ },
701
+ {
702
+ "epoch": 0.23,
703
+ "learning_rate": 1.9704917941574053e-05,
704
+ "loss": 2.6973,
705
+ "step": 580
706
+ },
707
+ {
708
+ "epoch": 0.23,
709
+ "learning_rate": 1.969983410581961e-05,
710
+ "loss": 2.6518,
711
+ "step": 585
712
+ },
713
+ {
714
+ "epoch": 0.24,
715
+ "learning_rate": 1.969470751687825e-05,
716
+ "loss": 2.5849,
717
+ "step": 590
718
+ },
719
+ {
720
+ "epoch": 0.24,
721
+ "learning_rate": 1.9689538197346035e-05,
722
+ "loss": 2.4715,
723
+ "step": 595
724
+ },
725
+ {
726
+ "epoch": 0.24,
727
+ "learning_rate": 1.9684326170007365e-05,
728
+ "loss": 2.7246,
729
+ "step": 600
730
+ },
731
+ {
732
+ "epoch": 0.24,
733
+ "learning_rate": 1.9679071457834874e-05,
734
+ "loss": 2.5482,
735
+ "step": 605
736
+ },
737
+ {
738
+ "epoch": 0.24,
739
+ "learning_rate": 1.967377408398934e-05,
740
+ "loss": 2.6084,
741
+ "step": 610
742
+ },
743
+ {
744
+ "epoch": 0.25,
745
+ "learning_rate": 1.966843407181958e-05,
746
+ "loss": 2.5144,
747
+ "step": 615
748
+ },
749
+ {
750
+ "epoch": 0.25,
751
+ "learning_rate": 1.9663051444862335e-05,
752
+ "loss": 2.7663,
753
+ "step": 620
754
+ },
755
+ {
756
+ "epoch": 0.25,
757
+ "learning_rate": 1.9657626226842187e-05,
758
+ "loss": 2.5697,
759
+ "step": 625
760
+ },
761
+ {
762
+ "epoch": 0.25,
763
+ "learning_rate": 1.9652158441671435e-05,
764
+ "loss": 2.6379,
765
+ "step": 630
766
+ },
767
+ {
768
+ "epoch": 0.25,
769
+ "learning_rate": 1.964664811345e-05,
770
+ "loss": 2.6784,
771
+ "step": 635
772
+ },
773
+ {
774
+ "epoch": 0.26,
775
+ "learning_rate": 1.964109526646532e-05,
776
+ "loss": 2.6936,
777
+ "step": 640
778
+ },
779
+ {
780
+ "epoch": 0.26,
781
+ "learning_rate": 1.963549992519223e-05,
782
+ "loss": 2.5672,
783
+ "step": 645
784
+ },
785
+ {
786
+ "epoch": 0.26,
787
+ "learning_rate": 1.962986211429288e-05,
788
+ "loss": 2.62,
789
+ "step": 650
790
+ },
791
+ {
792
+ "epoch": 0.26,
793
+ "learning_rate": 1.9624181858616593e-05,
794
+ "loss": 2.7293,
795
+ "step": 655
796
+ },
797
+ {
798
+ "epoch": 0.26,
799
+ "learning_rate": 1.9618459183199782e-05,
800
+ "loss": 2.6636,
801
+ "step": 660
802
+ },
803
+ {
804
+ "epoch": 0.27,
805
+ "learning_rate": 1.961269411326583e-05,
806
+ "loss": 2.5106,
807
+ "step": 665
808
+ },
809
+ {
810
+ "epoch": 0.27,
811
+ "learning_rate": 1.9606886674224977e-05,
812
+ "loss": 2.6878,
813
+ "step": 670
814
+ },
815
+ {
816
+ "epoch": 0.27,
817
+ "learning_rate": 1.960103689167421e-05,
818
+ "loss": 2.693,
819
+ "step": 675
820
+ },
821
+ {
822
+ "epoch": 0.27,
823
+ "learning_rate": 1.9595144791397142e-05,
824
+ "loss": 2.6562,
825
+ "step": 680
826
+ },
827
+ {
828
+ "epoch": 0.27,
829
+ "learning_rate": 1.9589210399363925e-05,
830
+ "loss": 2.6269,
831
+ "step": 685
832
+ },
833
+ {
834
+ "epoch": 0.28,
835
+ "learning_rate": 1.95832337417311e-05,
836
+ "loss": 2.6459,
837
+ "step": 690
838
+ },
839
+ {
840
+ "epoch": 0.28,
841
+ "learning_rate": 1.9577214844841515e-05,
842
+ "loss": 2.5765,
843
+ "step": 695
844
+ },
845
+ {
846
+ "epoch": 0.28,
847
+ "learning_rate": 1.957115373522417e-05,
848
+ "loss": 2.6113,
849
+ "step": 700
850
+ },
851
+ {
852
+ "epoch": 0.28,
853
+ "learning_rate": 1.956505043959414e-05,
854
+ "loss": 2.6641,
855
+ "step": 705
856
+ },
857
+ {
858
+ "epoch": 0.28,
859
+ "learning_rate": 1.955890498485244e-05,
860
+ "loss": 2.6493,
861
+ "step": 710
862
+ },
863
+ {
864
+ "epoch": 0.29,
865
+ "learning_rate": 1.9552717398085898e-05,
866
+ "loss": 2.6135,
867
+ "step": 715
868
+ },
869
+ {
870
+ "epoch": 0.29,
871
+ "learning_rate": 1.954648770656705e-05,
872
+ "loss": 2.6182,
873
+ "step": 720
874
+ },
875
+ {
876
+ "epoch": 0.29,
877
+ "learning_rate": 1.954021593775401e-05,
878
+ "loss": 2.6487,
879
+ "step": 725
880
+ },
881
+ {
882
+ "epoch": 0.29,
883
+ "learning_rate": 1.9533902119290352e-05,
884
+ "loss": 2.5927,
885
+ "step": 730
886
+ },
887
+ {
888
+ "epoch": 0.29,
889
+ "learning_rate": 1.952754627900499e-05,
890
+ "loss": 2.5825,
891
+ "step": 735
892
+ },
893
+ {
894
+ "epoch": 0.3,
895
+ "learning_rate": 1.9521148444912065e-05,
896
+ "loss": 2.7193,
897
+ "step": 740
898
+ },
899
+ {
900
+ "epoch": 0.3,
901
+ "learning_rate": 1.9514708645210793e-05,
902
+ "loss": 2.5802,
903
+ "step": 745
904
+ },
905
+ {
906
+ "epoch": 0.3,
907
+ "learning_rate": 1.9508226908285368e-05,
908
+ "loss": 2.6628,
909
+ "step": 750
910
+ },
911
+ {
912
+ "epoch": 0.3,
913
+ "learning_rate": 1.950170326270483e-05,
914
+ "loss": 2.5847,
915
+ "step": 755
916
+ },
917
+ {
918
+ "epoch": 0.3,
919
+ "learning_rate": 1.9495137737222925e-05,
920
+ "loss": 2.6594,
921
+ "step": 760
922
+ },
923
+ {
924
+ "epoch": 0.31,
925
+ "learning_rate": 1.9488530360778007e-05,
926
+ "loss": 2.6096,
927
+ "step": 765
928
+ },
929
+ {
930
+ "epoch": 0.31,
931
+ "learning_rate": 1.948188116249287e-05,
932
+ "loss": 2.6378,
933
+ "step": 770
934
+ },
935
+ {
936
+ "epoch": 0.31,
937
+ "learning_rate": 1.9475190171674675e-05,
938
+ "loss": 2.5984,
939
+ "step": 775
940
+ },
941
+ {
942
+ "epoch": 0.31,
943
+ "learning_rate": 1.9468457417814753e-05,
944
+ "loss": 2.6437,
945
+ "step": 780
946
+ },
947
+ {
948
+ "epoch": 0.31,
949
+ "learning_rate": 1.9461682930588534e-05,
950
+ "loss": 2.6522,
951
+ "step": 785
952
+ },
953
+ {
954
+ "epoch": 0.32,
955
+ "learning_rate": 1.9454866739855384e-05,
956
+ "loss": 2.6242,
957
+ "step": 790
958
+ },
959
+ {
960
+ "epoch": 0.32,
961
+ "learning_rate": 1.944800887565849e-05,
962
+ "loss": 2.5961,
963
+ "step": 795
964
+ },
965
+ {
966
+ "epoch": 0.32,
967
+ "learning_rate": 1.9441109368224704e-05,
968
+ "loss": 2.6365,
969
+ "step": 800
970
+ },
971
+ {
972
+ "epoch": 0.32,
973
+ "learning_rate": 1.9434168247964447e-05,
974
+ "loss": 2.5674,
975
+ "step": 805
976
+ },
977
+ {
978
+ "epoch": 0.32,
979
+ "learning_rate": 1.9427185545471537e-05,
980
+ "loss": 2.6369,
981
+ "step": 810
982
+ },
983
+ {
984
+ "epoch": 0.33,
985
+ "learning_rate": 1.9420161291523076e-05,
986
+ "loss": 2.5763,
987
+ "step": 815
988
+ },
989
+ {
990
+ "epoch": 0.33,
991
+ "learning_rate": 1.941309551707931e-05,
992
+ "loss": 2.5651,
993
+ "step": 820
994
+ },
995
+ {
996
+ "epoch": 0.33,
997
+ "learning_rate": 1.9405988253283492e-05,
998
+ "loss": 2.6223,
999
+ "step": 825
1000
+ },
1001
+ {
1002
+ "epoch": 0.33,
1003
+ "learning_rate": 1.939883953146174e-05,
1004
+ "loss": 2.6616,
1005
+ "step": 830
1006
+ },
1007
+ {
1008
+ "epoch": 0.33,
1009
+ "learning_rate": 1.939164938312291e-05,
1010
+ "loss": 2.6496,
1011
+ "step": 835
1012
+ },
1013
+ {
1014
+ "epoch": 0.34,
1015
+ "learning_rate": 1.9384417839958443e-05,
1016
+ "loss": 2.7161,
1017
+ "step": 840
1018
+ },
1019
+ {
1020
+ "epoch": 0.34,
1021
+ "learning_rate": 1.937714493384224e-05,
1022
+ "loss": 2.6047,
1023
+ "step": 845
1024
+ },
1025
+ {
1026
+ "epoch": 0.34,
1027
+ "learning_rate": 1.936983069683051e-05,
1028
+ "loss": 2.5978,
1029
+ "step": 850
1030
+ },
1031
+ {
1032
+ "epoch": 0.34,
1033
+ "learning_rate": 1.936247516116163e-05,
1034
+ "loss": 2.6331,
1035
+ "step": 855
1036
+ },
1037
+ {
1038
+ "epoch": 0.34,
1039
+ "learning_rate": 1.935507835925601e-05,
1040
+ "loss": 2.5679,
1041
+ "step": 860
1042
+ },
1043
+ {
1044
+ "epoch": 0.35,
1045
+ "learning_rate": 1.934764032371595e-05,
1046
+ "loss": 2.6647,
1047
+ "step": 865
1048
+ },
1049
+ {
1050
+ "epoch": 0.35,
1051
+ "learning_rate": 1.9340161087325483e-05,
1052
+ "loss": 2.6142,
1053
+ "step": 870
1054
+ },
1055
+ {
1056
+ "epoch": 0.35,
1057
+ "learning_rate": 1.9332640683050243e-05,
1058
+ "loss": 2.6181,
1059
+ "step": 875
1060
+ },
1061
+ {
1062
+ "epoch": 0.35,
1063
+ "learning_rate": 1.932507914403732e-05,
1064
+ "loss": 2.6105,
1065
+ "step": 880
1066
+ },
1067
+ {
1068
+ "epoch": 0.35,
1069
+ "learning_rate": 1.9317476503615108e-05,
1070
+ "loss": 2.6415,
1071
+ "step": 885
1072
+ },
1073
+ {
1074
+ "epoch": 0.36,
1075
+ "learning_rate": 1.9309832795293156e-05,
1076
+ "loss": 2.6736,
1077
+ "step": 890
1078
+ },
1079
+ {
1080
+ "epoch": 0.36,
1081
+ "learning_rate": 1.930214805276204e-05,
1082
+ "loss": 2.6507,
1083
+ "step": 895
1084
+ },
1085
+ {
1086
+ "epoch": 0.36,
1087
+ "learning_rate": 1.9294422309893177e-05,
1088
+ "loss": 2.6771,
1089
+ "step": 900
1090
+ },
1091
+ {
1092
+ "epoch": 0.36,
1093
+ "learning_rate": 1.9286655600738707e-05,
1094
+ "loss": 2.6763,
1095
+ "step": 905
1096
+ },
1097
+ {
1098
+ "epoch": 0.36,
1099
+ "learning_rate": 1.9278847959531348e-05,
1100
+ "loss": 2.7135,
1101
+ "step": 910
1102
+ },
1103
+ {
1104
+ "epoch": 0.37,
1105
+ "learning_rate": 1.927099942068421e-05,
1106
+ "loss": 2.6166,
1107
+ "step": 915
1108
+ },
1109
+ {
1110
+ "epoch": 0.37,
1111
+ "learning_rate": 1.9263110018790673e-05,
1112
+ "loss": 2.6238,
1113
+ "step": 920
1114
+ },
1115
+ {
1116
+ "epoch": 0.37,
1117
+ "learning_rate": 1.9255179788624233e-05,
1118
+ "loss": 2.6424,
1119
+ "step": 925
1120
+ },
1121
+ {
1122
+ "epoch": 0.37,
1123
+ "learning_rate": 1.9247208765138325e-05,
1124
+ "loss": 2.6023,
1125
+ "step": 930
1126
+ },
1127
+ {
1128
+ "epoch": 0.37,
1129
+ "learning_rate": 1.9239196983466204e-05,
1130
+ "loss": 2.58,
1131
+ "step": 935
1132
+ },
1133
+ {
1134
+ "epoch": 0.38,
1135
+ "learning_rate": 1.9231144478920756e-05,
1136
+ "loss": 2.6173,
1137
+ "step": 940
1138
+ },
1139
+ {
1140
+ "epoch": 0.38,
1141
+ "learning_rate": 1.9223051286994368e-05,
1142
+ "loss": 2.628,
1143
+ "step": 945
1144
+ },
1145
+ {
1146
+ "epoch": 0.38,
1147
+ "learning_rate": 1.9214917443358753e-05,
1148
+ "loss": 2.6868,
1149
+ "step": 950
1150
+ },
1151
+ {
1152
+ "epoch": 0.38,
1153
+ "learning_rate": 1.9206742983864813e-05,
1154
+ "loss": 2.6342,
1155
+ "step": 955
1156
+ },
1157
+ {
1158
+ "epoch": 0.38,
1159
+ "learning_rate": 1.9198527944542462e-05,
1160
+ "loss": 2.5934,
1161
+ "step": 960
1162
+ },
1163
+ {
1164
+ "epoch": 0.39,
1165
+ "learning_rate": 1.919027236160047e-05,
1166
+ "loss": 2.6354,
1167
+ "step": 965
1168
+ },
1169
+ {
1170
+ "epoch": 0.39,
1171
+ "learning_rate": 1.9181976271426315e-05,
1172
+ "loss": 2.5955,
1173
+ "step": 970
1174
+ },
1175
+ {
1176
+ "epoch": 0.39,
1177
+ "learning_rate": 1.9173639710586015e-05,
1178
+ "loss": 2.6134,
1179
+ "step": 975
1180
+ },
1181
+ {
1182
+ "epoch": 0.39,
1183
+ "learning_rate": 1.9165262715823966e-05,
1184
+ "loss": 2.7,
1185
+ "step": 980
1186
+ },
1187
+ {
1188
+ "epoch": 0.39,
1189
+ "learning_rate": 1.915684532406278e-05,
1190
+ "loss": 2.7197,
1191
+ "step": 985
1192
+ },
1193
+ {
1194
+ "epoch": 0.4,
1195
+ "learning_rate": 1.9148387572403123e-05,
1196
+ "loss": 2.4881,
1197
+ "step": 990
1198
+ },
1199
+ {
1200
+ "epoch": 0.4,
1201
+ "learning_rate": 1.913988949812356e-05,
1202
+ "loss": 2.6999,
1203
+ "step": 995
1204
+ },
1205
+ {
1206
+ "epoch": 0.4,
1207
+ "learning_rate": 1.9131351138680368e-05,
1208
+ "loss": 2.5981,
1209
+ "step": 1000
1210
+ },
1211
+ {
1212
+ "epoch": 0.4,
1213
+ "learning_rate": 1.9122772531707405e-05,
1214
+ "loss": 2.5515,
1215
+ "step": 1005
1216
+ },
1217
+ {
1218
+ "epoch": 0.4,
1219
+ "learning_rate": 1.9114153715015905e-05,
1220
+ "loss": 2.623,
1221
+ "step": 1010
1222
+ },
1223
+ {
1224
+ "epoch": 0.41,
1225
+ "learning_rate": 1.9105494726594344e-05,
1226
+ "loss": 2.6006,
1227
+ "step": 1015
1228
+ },
1229
+ {
1230
+ "epoch": 0.41,
1231
+ "learning_rate": 1.9096795604608258e-05,
1232
+ "loss": 2.5929,
1233
+ "step": 1020
1234
+ },
1235
+ {
1236
+ "epoch": 0.41,
1237
+ "learning_rate": 1.9088056387400074e-05,
1238
+ "loss": 2.5396,
1239
+ "step": 1025
1240
+ },
1241
+ {
1242
+ "epoch": 0.41,
1243
+ "learning_rate": 1.907927711348894e-05,
1244
+ "loss": 2.6428,
1245
+ "step": 1030
1246
+ },
1247
+ {
1248
+ "epoch": 0.41,
1249
+ "learning_rate": 1.9070457821570566e-05,
1250
+ "loss": 2.6296,
1251
+ "step": 1035
1252
+ },
1253
+ {
1254
+ "epoch": 0.42,
1255
+ "learning_rate": 1.9061598550517048e-05,
1256
+ "loss": 2.534,
1257
+ "step": 1040
1258
+ },
1259
+ {
1260
+ "epoch": 0.42,
1261
+ "learning_rate": 1.9052699339376685e-05,
1262
+ "loss": 2.6049,
1263
+ "step": 1045
1264
+ },
1265
+ {
1266
+ "epoch": 0.42,
1267
+ "learning_rate": 1.9043760227373817e-05,
1268
+ "loss": 2.5776,
1269
+ "step": 1050
1270
+ },
1271
+ {
1272
+ "epoch": 0.42,
1273
+ "learning_rate": 1.9034781253908664e-05,
1274
+ "loss": 2.7483,
1275
+ "step": 1055
1276
+ },
1277
+ {
1278
+ "epoch": 0.43,
1279
+ "learning_rate": 1.902576245855713e-05,
1280
+ "loss": 2.6973,
1281
+ "step": 1060
1282
+ },
1283
+ {
1284
+ "epoch": 0.43,
1285
+ "learning_rate": 1.9016703881070646e-05,
1286
+ "loss": 2.645,
1287
+ "step": 1065
1288
+ },
1289
+ {
1290
+ "epoch": 0.43,
1291
+ "learning_rate": 1.900760556137598e-05,
1292
+ "loss": 2.5775,
1293
+ "step": 1070
1294
+ },
1295
+ {
1296
+ "epoch": 0.43,
1297
+ "learning_rate": 1.899846753957507e-05,
1298
+ "loss": 2.6872,
1299
+ "step": 1075
1300
+ },
1301
+ {
1302
+ "epoch": 0.43,
1303
+ "learning_rate": 1.8989289855944846e-05,
1304
+ "loss": 2.5929,
1305
+ "step": 1080
1306
+ },
1307
+ {
1308
+ "epoch": 0.44,
1309
+ "learning_rate": 1.8980072550937058e-05,
1310
+ "loss": 2.5668,
1311
+ "step": 1085
1312
+ },
1313
+ {
1314
+ "epoch": 0.44,
1315
+ "learning_rate": 1.8970815665178086e-05,
1316
+ "loss": 2.6873,
1317
+ "step": 1090
1318
+ },
1319
+ {
1320
+ "epoch": 0.44,
1321
+ "learning_rate": 1.896151923946877e-05,
1322
+ "loss": 2.5679,
1323
+ "step": 1095
1324
+ },
1325
+ {
1326
+ "epoch": 0.44,
1327
+ "learning_rate": 1.8952183314784224e-05,
1328
+ "loss": 2.6734,
1329
+ "step": 1100
1330
+ }
1331
+ ],
1332
+ "logging_steps": 5,
1333
+ "max_steps": 7482,
1334
+ "num_input_tokens_seen": 0,
1335
+ "num_train_epochs": 3,
1336
+ "save_steps": 100,
1337
+ "total_flos": 1.465200501424128e+16,
1338
+ "train_batch_size": 4,
1339
+ "trial_name": null,
1340
+ "trial_params": null
1341
+ }
checkpoint-1100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c481a87016b183d9406558b8812ee05f00d60a3c721055833e3cdda34cf9bb26
3
+ size 4920
checkpoint-1100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1200/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-0.5B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.8.2
checkpoint-1200/adapter_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-0.5B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 8,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM",
26
+ "use_rslora": false
27
+ }
checkpoint-1200/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8ac54222bde53351cbf4c771875eb4a60e28dff8500e39019ea2105dc4783dc
3
+ size 3158328
checkpoint-1200/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-1200/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:182e06d4a3ae15651a75020a84259190d31e5a818c7c791ba3d5143e661ba2ea
3
+ size 6372346