Training in progress, step 200
Browse files- .gitattributes +1 -0
- added_tokens.json +24 -0
- config.json +28 -0
- merges.txt +0 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +346 -0
- special_tokens_map.json +31 -0
- tokenizer.json +3 -0
- tokenizer_config.json +209 -0
- trainer_log.jsonl +201 -0
- training_args.bin +3 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
added_tokens.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"</tool_call>": 151658,
|
3 |
+
"<tool_call>": 151657,
|
4 |
+
"<|box_end|>": 151649,
|
5 |
+
"<|box_start|>": 151648,
|
6 |
+
"<|endoftext|>": 151643,
|
7 |
+
"<|file_sep|>": 151664,
|
8 |
+
"<|fim_middle|>": 151660,
|
9 |
+
"<|fim_pad|>": 151662,
|
10 |
+
"<|fim_prefix|>": 151659,
|
11 |
+
"<|fim_suffix|>": 151661,
|
12 |
+
"<|im_end|>": 151645,
|
13 |
+
"<|im_start|>": 151644,
|
14 |
+
"<|image_pad|>": 151655,
|
15 |
+
"<|object_ref_end|>": 151647,
|
16 |
+
"<|object_ref_start|>": 151646,
|
17 |
+
"<|quad_end|>": 151651,
|
18 |
+
"<|quad_start|>": 151650,
|
19 |
+
"<|repo_name|>": 151663,
|
20 |
+
"<|video_pad|>": 151656,
|
21 |
+
"<|vision_end|>": 151653,
|
22 |
+
"<|vision_pad|>": 151654,
|
23 |
+
"<|vision_start|>": 151652
|
24 |
+
}
|
config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Qwen2ForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_dropout": 0.0,
|
6 |
+
"bos_token_id": 151643,
|
7 |
+
"eos_token_id": 151645,
|
8 |
+
"hidden_act": "silu",
|
9 |
+
"hidden_size": 3584,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 18944,
|
12 |
+
"max_position_embeddings": 32768,
|
13 |
+
"max_window_layers": 28,
|
14 |
+
"model_type": "qwen2",
|
15 |
+
"num_attention_heads": 28,
|
16 |
+
"num_hidden_layers": 28,
|
17 |
+
"num_key_value_heads": 4,
|
18 |
+
"rms_norm_eps": 1e-06,
|
19 |
+
"rope_scaling": null,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": 131072,
|
22 |
+
"tie_word_embeddings": false,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.51.2",
|
25 |
+
"use_cache": false,
|
26 |
+
"use_sliding_window": false,
|
27 |
+
"vocab_size": 152064
|
28 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1cec48501c68438fd96defd3a173a0332ad8e850270f46dfb15c18df0376462
|
3 |
+
size 4877660776
|
model-00002-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c379b2fc5dd2388239443d744364768c78238db82c0f450ef332d4ff07243a9
|
3 |
+
size 4932751008
|
model-00003-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a966b33baec3b3b71eaa8510a97f7a890252cdb75bd0f5608353e4946d5b9b7
|
3 |
+
size 4330865200
|
model-00004-of-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4f4e5075a2d4fada288e085f1f0e239123ffd0a38c5fc8903fded96086e9325
|
3 |
+
size 1089994880
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 15231233024
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00004-of-00004.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00004.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
14 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
15 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
16 |
+
"model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
17 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
18 |
+
"model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
19 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
20 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
21 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
22 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
23 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
24 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
25 |
+
"model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
26 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
27 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
28 |
+
"model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
29 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
30 |
+
"model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
31 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
32 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
33 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
34 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
35 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
36 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
37 |
+
"model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
38 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
39 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
40 |
+
"model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
41 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
42 |
+
"model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
43 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
44 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
45 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
46 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
47 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
48 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
49 |
+
"model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
50 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
51 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
52 |
+
"model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
53 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
54 |
+
"model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
55 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
56 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
57 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
58 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
59 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
60 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
61 |
+
"model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
62 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
63 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
64 |
+
"model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
65 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
66 |
+
"model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
67 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
68 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
69 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
70 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
71 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
72 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
73 |
+
"model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
74 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
75 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
76 |
+
"model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
77 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
78 |
+
"model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
79 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
80 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
81 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
82 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
83 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
84 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
85 |
+
"model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
86 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
87 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
88 |
+
"model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
89 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
90 |
+
"model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
91 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
92 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
93 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
94 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
95 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
96 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
97 |
+
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
98 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
99 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
100 |
+
"model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
101 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
102 |
+
"model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
103 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
104 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
105 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
106 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
107 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
108 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
109 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
110 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
111 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
112 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
113 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
114 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
115 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
116 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
117 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
118 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
119 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
120 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
121 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
122 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
123 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
124 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
125 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
126 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
127 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
128 |
+
"model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
129 |
+
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
130 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
131 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
132 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
133 |
+
"model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
134 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
135 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
136 |
+
"model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
137 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
138 |
+
"model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
139 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
140 |
+
"model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
141 |
+
"model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
142 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
143 |
+
"model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
144 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
145 |
+
"model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
146 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
147 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
148 |
+
"model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
149 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
150 |
+
"model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
151 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
152 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
153 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
154 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
155 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
156 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
157 |
+
"model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
158 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
159 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
160 |
+
"model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
161 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
162 |
+
"model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
163 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
164 |
+
"model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
165 |
+
"model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
166 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
167 |
+
"model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
168 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
169 |
+
"model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
170 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
171 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
172 |
+
"model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
173 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
174 |
+
"model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
175 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
176 |
+
"model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
177 |
+
"model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
178 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
179 |
+
"model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
180 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
181 |
+
"model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
182 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
183 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
184 |
+
"model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
185 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
186 |
+
"model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
187 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
188 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
189 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
190 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
191 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
192 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
193 |
+
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
194 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
195 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
196 |
+
"model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
197 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
198 |
+
"model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
199 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
200 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
201 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
202 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
203 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
204 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
205 |
+
"model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
206 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
207 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
208 |
+
"model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
209 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
210 |
+
"model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
211 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
212 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
213 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
214 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
215 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
216 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
217 |
+
"model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
218 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
219 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
220 |
+
"model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
221 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
222 |
+
"model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
223 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
224 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
225 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
226 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
227 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
228 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
229 |
+
"model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
230 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
231 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
232 |
+
"model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
233 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
234 |
+
"model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
235 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
236 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
237 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
238 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
239 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
240 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
241 |
+
"model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
242 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
243 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
244 |
+
"model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
245 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
246 |
+
"model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
247 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
248 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
|
249 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
|
250 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
|
251 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
|
252 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
|
253 |
+
"model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
|
254 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
|
255 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
|
256 |
+
"model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
|
257 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
|
258 |
+
"model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
|
259 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
|
260 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
261 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
262 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
263 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
264 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
265 |
+
"model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
266 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
267 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
268 |
+
"model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
269 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
270 |
+
"model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
271 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
272 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
273 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
274 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
275 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
276 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
277 |
+
"model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
278 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
279 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
280 |
+
"model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
281 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
282 |
+
"model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
283 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
284 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
285 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
286 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
287 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
288 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
289 |
+
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
290 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
291 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
292 |
+
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
293 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
294 |
+
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
295 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
296 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
297 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
298 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
299 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
300 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
301 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
302 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
303 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
304 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
305 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
306 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
307 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
308 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
|
309 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
|
310 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
|
311 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
|
312 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
|
313 |
+
"model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
314 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
315 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
316 |
+
"model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
317 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
318 |
+
"model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
319 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
320 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
321 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
322 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
323 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
324 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
325 |
+
"model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
|
326 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
|
327 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
|
328 |
+
"model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
|
329 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
|
330 |
+
"model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
|
331 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
|
332 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
|
333 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
|
334 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
|
335 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
|
336 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
|
337 |
+
"model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
|
338 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
|
339 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
|
340 |
+
"model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
|
341 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
|
342 |
+
"model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
|
343 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
|
344 |
+
"model.norm.weight": "model-00003-of-00004.safetensors"
|
345 |
+
}
|
346 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>",
|
5 |
+
"<|object_ref_start|>",
|
6 |
+
"<|object_ref_end|>",
|
7 |
+
"<|box_start|>",
|
8 |
+
"<|box_end|>",
|
9 |
+
"<|quad_start|>",
|
10 |
+
"<|quad_end|>",
|
11 |
+
"<|vision_start|>",
|
12 |
+
"<|vision_end|>",
|
13 |
+
"<|vision_pad|>",
|
14 |
+
"<|image_pad|>",
|
15 |
+
"<|video_pad|>"
|
16 |
+
],
|
17 |
+
"eos_token": {
|
18 |
+
"content": "<|im_end|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": false,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
},
|
24 |
+
"pad_token": {
|
25 |
+
"content": "<|endoftext|>",
|
26 |
+
"lstrip": false,
|
27 |
+
"normalized": false,
|
28 |
+
"rstrip": false,
|
29 |
+
"single_word": false
|
30 |
+
}
|
31 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
|
3 |
+
size 11421896
|
tokenizer_config.json
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"151646": {
|
30 |
+
"content": "<|object_ref_start|>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
},
|
37 |
+
"151647": {
|
38 |
+
"content": "<|object_ref_end|>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": false,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false,
|
43 |
+
"special": true
|
44 |
+
},
|
45 |
+
"151648": {
|
46 |
+
"content": "<|box_start|>",
|
47 |
+
"lstrip": false,
|
48 |
+
"normalized": false,
|
49 |
+
"rstrip": false,
|
50 |
+
"single_word": false,
|
51 |
+
"special": true
|
52 |
+
},
|
53 |
+
"151649": {
|
54 |
+
"content": "<|box_end|>",
|
55 |
+
"lstrip": false,
|
56 |
+
"normalized": false,
|
57 |
+
"rstrip": false,
|
58 |
+
"single_word": false,
|
59 |
+
"special": true
|
60 |
+
},
|
61 |
+
"151650": {
|
62 |
+
"content": "<|quad_start|>",
|
63 |
+
"lstrip": false,
|
64 |
+
"normalized": false,
|
65 |
+
"rstrip": false,
|
66 |
+
"single_word": false,
|
67 |
+
"special": true
|
68 |
+
},
|
69 |
+
"151651": {
|
70 |
+
"content": "<|quad_end|>",
|
71 |
+
"lstrip": false,
|
72 |
+
"normalized": false,
|
73 |
+
"rstrip": false,
|
74 |
+
"single_word": false,
|
75 |
+
"special": true
|
76 |
+
},
|
77 |
+
"151652": {
|
78 |
+
"content": "<|vision_start|>",
|
79 |
+
"lstrip": false,
|
80 |
+
"normalized": false,
|
81 |
+
"rstrip": false,
|
82 |
+
"single_word": false,
|
83 |
+
"special": true
|
84 |
+
},
|
85 |
+
"151653": {
|
86 |
+
"content": "<|vision_end|>",
|
87 |
+
"lstrip": false,
|
88 |
+
"normalized": false,
|
89 |
+
"rstrip": false,
|
90 |
+
"single_word": false,
|
91 |
+
"special": true
|
92 |
+
},
|
93 |
+
"151654": {
|
94 |
+
"content": "<|vision_pad|>",
|
95 |
+
"lstrip": false,
|
96 |
+
"normalized": false,
|
97 |
+
"rstrip": false,
|
98 |
+
"single_word": false,
|
99 |
+
"special": true
|
100 |
+
},
|
101 |
+
"151655": {
|
102 |
+
"content": "<|image_pad|>",
|
103 |
+
"lstrip": false,
|
104 |
+
"normalized": false,
|
105 |
+
"rstrip": false,
|
106 |
+
"single_word": false,
|
107 |
+
"special": true
|
108 |
+
},
|
109 |
+
"151656": {
|
110 |
+
"content": "<|video_pad|>",
|
111 |
+
"lstrip": false,
|
112 |
+
"normalized": false,
|
113 |
+
"rstrip": false,
|
114 |
+
"single_word": false,
|
115 |
+
"special": true
|
116 |
+
},
|
117 |
+
"151657": {
|
118 |
+
"content": "<tool_call>",
|
119 |
+
"lstrip": false,
|
120 |
+
"normalized": false,
|
121 |
+
"rstrip": false,
|
122 |
+
"single_word": false,
|
123 |
+
"special": false
|
124 |
+
},
|
125 |
+
"151658": {
|
126 |
+
"content": "</tool_call>",
|
127 |
+
"lstrip": false,
|
128 |
+
"normalized": false,
|
129 |
+
"rstrip": false,
|
130 |
+
"single_word": false,
|
131 |
+
"special": false
|
132 |
+
},
|
133 |
+
"151659": {
|
134 |
+
"content": "<|fim_prefix|>",
|
135 |
+
"lstrip": false,
|
136 |
+
"normalized": false,
|
137 |
+
"rstrip": false,
|
138 |
+
"single_word": false,
|
139 |
+
"special": false
|
140 |
+
},
|
141 |
+
"151660": {
|
142 |
+
"content": "<|fim_middle|>",
|
143 |
+
"lstrip": false,
|
144 |
+
"normalized": false,
|
145 |
+
"rstrip": false,
|
146 |
+
"single_word": false,
|
147 |
+
"special": false
|
148 |
+
},
|
149 |
+
"151661": {
|
150 |
+
"content": "<|fim_suffix|>",
|
151 |
+
"lstrip": false,
|
152 |
+
"normalized": false,
|
153 |
+
"rstrip": false,
|
154 |
+
"single_word": false,
|
155 |
+
"special": false
|
156 |
+
},
|
157 |
+
"151662": {
|
158 |
+
"content": "<|fim_pad|>",
|
159 |
+
"lstrip": false,
|
160 |
+
"normalized": false,
|
161 |
+
"rstrip": false,
|
162 |
+
"single_word": false,
|
163 |
+
"special": false
|
164 |
+
},
|
165 |
+
"151663": {
|
166 |
+
"content": "<|repo_name|>",
|
167 |
+
"lstrip": false,
|
168 |
+
"normalized": false,
|
169 |
+
"rstrip": false,
|
170 |
+
"single_word": false,
|
171 |
+
"special": false
|
172 |
+
},
|
173 |
+
"151664": {
|
174 |
+
"content": "<|file_sep|>",
|
175 |
+
"lstrip": false,
|
176 |
+
"normalized": false,
|
177 |
+
"rstrip": false,
|
178 |
+
"single_word": false,
|
179 |
+
"special": false
|
180 |
+
}
|
181 |
+
},
|
182 |
+
"additional_special_tokens": [
|
183 |
+
"<|im_start|>",
|
184 |
+
"<|im_end|>",
|
185 |
+
"<|object_ref_start|>",
|
186 |
+
"<|object_ref_end|>",
|
187 |
+
"<|box_start|>",
|
188 |
+
"<|box_end|>",
|
189 |
+
"<|quad_start|>",
|
190 |
+
"<|quad_end|>",
|
191 |
+
"<|vision_start|>",
|
192 |
+
"<|vision_end|>",
|
193 |
+
"<|vision_pad|>",
|
194 |
+
"<|image_pad|>",
|
195 |
+
"<|video_pad|>"
|
196 |
+
],
|
197 |
+
"bos_token": null,
|
198 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
+
"clean_up_tokenization_spaces": false,
|
200 |
+
"eos_token": "<|im_end|>",
|
201 |
+
"errors": "replace",
|
202 |
+
"extra_special_tokens": {},
|
203 |
+
"model_max_length": 131072,
|
204 |
+
"pad_token": "<|endoftext|>",
|
205 |
+
"padding_side": "right",
|
206 |
+
"split_special_tokens": false,
|
207 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
208 |
+
"unk_token": null
|
209 |
+
}
|
trainer_log.jsonl
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"current_steps": 1, "total_steps": 519, "loss": 1.7935, "lr": 0.0, "epoch": 0.0057553956834532375, "percentage": 0.19, "elapsed_time": "0:01:49", "remaining_time": "15:41:23"}
|
2 |
+
{"current_steps": 2, "total_steps": 519, "loss": 2.0296, "lr": 1.9230769230769234e-07, "epoch": 0.011510791366906475, "percentage": 0.39, "elapsed_time": "0:03:18", "remaining_time": "14:15:25"}
|
3 |
+
{"current_steps": 3, "total_steps": 519, "loss": 2.0072, "lr": 3.846153846153847e-07, "epoch": 0.017266187050359712, "percentage": 0.58, "elapsed_time": "0:05:06", "remaining_time": "14:38:53"}
|
4 |
+
{"current_steps": 4, "total_steps": 519, "loss": 1.9133, "lr": 5.76923076923077e-07, "epoch": 0.02302158273381295, "percentage": 0.77, "elapsed_time": "0:06:52", "remaining_time": "14:45:37"}
|
5 |
+
{"current_steps": 5, "total_steps": 519, "loss": 1.9452, "lr": 7.692307692307694e-07, "epoch": 0.02877697841726619, "percentage": 0.96, "elapsed_time": "0:08:42", "remaining_time": "14:55:03"}
|
6 |
+
{"current_steps": 6, "total_steps": 519, "loss": 1.8685, "lr": 9.615384615384617e-07, "epoch": 0.034532374100719423, "percentage": 1.16, "elapsed_time": "0:10:55", "remaining_time": "15:34:28"}
|
7 |
+
{"current_steps": 7, "total_steps": 519, "loss": 1.8723, "lr": 1.153846153846154e-06, "epoch": 0.04028776978417266, "percentage": 1.35, "elapsed_time": "0:13:08", "remaining_time": "16:00:58"}
|
8 |
+
{"current_steps": 8, "total_steps": 519, "loss": 1.8741, "lr": 1.3461538461538462e-06, "epoch": 0.0460431654676259, "percentage": 1.54, "elapsed_time": "0:15:10", "remaining_time": "16:09:04"}
|
9 |
+
{"current_steps": 9, "total_steps": 519, "loss": 1.7827, "lr": 1.5384615384615387e-06, "epoch": 0.051798561151079135, "percentage": 1.73, "elapsed_time": "0:16:42", "remaining_time": "15:47:10"}
|
10 |
+
{"current_steps": 10, "total_steps": 519, "loss": 1.6319, "lr": 1.7307692307692308e-06, "epoch": 0.05755395683453238, "percentage": 1.93, "elapsed_time": "0:18:39", "remaining_time": "15:49:27"}
|
11 |
+
{"current_steps": 11, "total_steps": 519, "loss": 1.4376, "lr": 1.9230769230769234e-06, "epoch": 0.06330935251798561, "percentage": 2.12, "elapsed_time": "0:20:24", "remaining_time": "15:42:21"}
|
12 |
+
{"current_steps": 12, "total_steps": 519, "loss": 1.613, "lr": 2.1153846153846155e-06, "epoch": 0.06906474820143885, "percentage": 2.31, "elapsed_time": "0:22:19", "remaining_time": "15:43:04"}
|
13 |
+
{"current_steps": 13, "total_steps": 519, "loss": 1.7048, "lr": 2.307692307692308e-06, "epoch": 0.07482014388489208, "percentage": 2.5, "elapsed_time": "0:24:06", "remaining_time": "15:38:04"}
|
14 |
+
{"current_steps": 14, "total_steps": 519, "loss": 1.5757, "lr": 2.5e-06, "epoch": 0.08057553956834532, "percentage": 2.7, "elapsed_time": "0:25:55", "remaining_time": "15:34:55"}
|
15 |
+
{"current_steps": 15, "total_steps": 519, "loss": 1.6356, "lr": 2.6923076923076923e-06, "epoch": 0.08633093525179857, "percentage": 2.89, "elapsed_time": "0:27:49", "remaining_time": "15:34:43"}
|
16 |
+
{"current_steps": 16, "total_steps": 519, "loss": 1.4826, "lr": 2.8846153846153845e-06, "epoch": 0.0920863309352518, "percentage": 3.08, "elapsed_time": "0:29:40", "remaining_time": "15:33:04"}
|
17 |
+
{"current_steps": 17, "total_steps": 519, "loss": 1.4606, "lr": 3.0769230769230774e-06, "epoch": 0.09784172661870504, "percentage": 3.28, "elapsed_time": "0:31:30", "remaining_time": "15:30:33"}
|
18 |
+
{"current_steps": 18, "total_steps": 519, "loss": 1.3337, "lr": 3.2692307692307696e-06, "epoch": 0.10359712230215827, "percentage": 3.47, "elapsed_time": "0:32:58", "remaining_time": "15:17:43"}
|
19 |
+
{"current_steps": 19, "total_steps": 519, "loss": 1.4853, "lr": 3.4615384615384617e-06, "epoch": 0.1093525179856115, "percentage": 3.66, "elapsed_time": "0:34:39", "remaining_time": "15:11:56"}
|
20 |
+
{"current_steps": 20, "total_steps": 519, "loss": 1.4027, "lr": 3.653846153846154e-06, "epoch": 0.11510791366906475, "percentage": 3.85, "elapsed_time": "0:36:27", "remaining_time": "15:09:26"}
|
21 |
+
{"current_steps": 21, "total_steps": 519, "loss": 1.3393, "lr": 3.846153846153847e-06, "epoch": 0.12086330935251799, "percentage": 4.05, "elapsed_time": "0:38:32", "remaining_time": "15:13:55"}
|
22 |
+
{"current_steps": 22, "total_steps": 519, "loss": 1.2821, "lr": 4.0384615384615385e-06, "epoch": 0.12661870503597122, "percentage": 4.24, "elapsed_time": "0:40:17", "remaining_time": "15:10:04"}
|
23 |
+
{"current_steps": 23, "total_steps": 519, "loss": 1.3744, "lr": 4.230769230769231e-06, "epoch": 0.13237410071942446, "percentage": 4.43, "elapsed_time": "0:43:06", "remaining_time": "15:29:40"}
|
24 |
+
{"current_steps": 24, "total_steps": 519, "loss": 1.2871, "lr": 4.423076923076924e-06, "epoch": 0.1381294964028777, "percentage": 4.62, "elapsed_time": "0:44:34", "remaining_time": "15:19:13"}
|
25 |
+
{"current_steps": 25, "total_steps": 519, "loss": 1.2311, "lr": 4.615384615384616e-06, "epoch": 0.14388489208633093, "percentage": 4.82, "elapsed_time": "0:46:14", "remaining_time": "15:13:37"}
|
26 |
+
{"current_steps": 26, "total_steps": 519, "loss": 1.2826, "lr": 4.807692307692308e-06, "epoch": 0.14964028776978416, "percentage": 5.01, "elapsed_time": "0:48:06", "remaining_time": "15:12:14"}
|
27 |
+
{"current_steps": 27, "total_steps": 519, "loss": 1.3041, "lr": 5e-06, "epoch": 0.1553956834532374, "percentage": 5.2, "elapsed_time": "0:49:54", "remaining_time": "15:09:20"}
|
28 |
+
{"current_steps": 28, "total_steps": 519, "loss": 1.1603, "lr": 5.192307692307693e-06, "epoch": 0.16115107913669063, "percentage": 5.39, "elapsed_time": "0:51:45", "remaining_time": "15:07:38"}
|
29 |
+
{"current_steps": 29, "total_steps": 519, "loss": 1.0935, "lr": 5.384615384615385e-06, "epoch": 0.1669064748201439, "percentage": 5.59, "elapsed_time": "0:53:23", "remaining_time": "15:02:07"}
|
30 |
+
{"current_steps": 30, "total_steps": 519, "loss": 1.2065, "lr": 5.576923076923077e-06, "epoch": 0.17266187050359713, "percentage": 5.78, "elapsed_time": "0:55:28", "remaining_time": "15:04:20"}
|
31 |
+
{"current_steps": 31, "total_steps": 519, "loss": 1.1324, "lr": 5.769230769230769e-06, "epoch": 0.17841726618705037, "percentage": 5.97, "elapsed_time": "0:57:02", "remaining_time": "14:57:56"}
|
32 |
+
{"current_steps": 32, "total_steps": 519, "loss": 1.0275, "lr": 5.961538461538462e-06, "epoch": 0.1841726618705036, "percentage": 6.17, "elapsed_time": "0:59:45", "remaining_time": "15:09:24"}
|
33 |
+
{"current_steps": 33, "total_steps": 519, "loss": 1.128, "lr": 6.153846153846155e-06, "epoch": 0.18992805755395684, "percentage": 6.36, "elapsed_time": "1:01:48", "remaining_time": "15:10:16"}
|
34 |
+
{"current_steps": 34, "total_steps": 519, "loss": 1.091, "lr": 6.3461538461538466e-06, "epoch": 0.19568345323741007, "percentage": 6.55, "elapsed_time": "1:03:38", "remaining_time": "15:07:49"}
|
35 |
+
{"current_steps": 35, "total_steps": 519, "loss": 1.0903, "lr": 6.538461538461539e-06, "epoch": 0.2014388489208633, "percentage": 6.74, "elapsed_time": "1:05:50", "remaining_time": "15:10:32"}
|
36 |
+
{"current_steps": 36, "total_steps": 519, "loss": 1.0229, "lr": 6.730769230769232e-06, "epoch": 0.20719424460431654, "percentage": 6.94, "elapsed_time": "1:08:05", "remaining_time": "15:13:34"}
|
37 |
+
{"current_steps": 37, "total_steps": 519, "loss": 1.0516, "lr": 6.923076923076923e-06, "epoch": 0.21294964028776978, "percentage": 7.13, "elapsed_time": "1:09:54", "remaining_time": "15:10:45"}
|
38 |
+
{"current_steps": 38, "total_steps": 519, "loss": 1.0718, "lr": 7.115384615384616e-06, "epoch": 0.218705035971223, "percentage": 7.32, "elapsed_time": "1:11:18", "remaining_time": "15:02:37"}
|
39 |
+
{"current_steps": 39, "total_steps": 519, "loss": 1.1291, "lr": 7.307692307692308e-06, "epoch": 0.22446043165467625, "percentage": 7.51, "elapsed_time": "1:13:13", "remaining_time": "15:01:14"}
|
40 |
+
{"current_steps": 40, "total_steps": 519, "loss": 1.1663, "lr": 7.500000000000001e-06, "epoch": 0.2302158273381295, "percentage": 7.71, "elapsed_time": "1:15:33", "remaining_time": "15:04:52"}
|
41 |
+
{"current_steps": 41, "total_steps": 519, "loss": 1.0187, "lr": 7.692307692307694e-06, "epoch": 0.23597122302158274, "percentage": 7.9, "elapsed_time": "1:16:53", "remaining_time": "14:56:25"}
|
42 |
+
{"current_steps": 42, "total_steps": 519, "loss": 1.0204, "lr": 7.884615384615384e-06, "epoch": 0.24172661870503598, "percentage": 8.09, "elapsed_time": "1:18:19", "remaining_time": "14:49:33"}
|
43 |
+
{"current_steps": 43, "total_steps": 519, "loss": 1.04, "lr": 8.076923076923077e-06, "epoch": 0.2474820143884892, "percentage": 8.29, "elapsed_time": "1:19:48", "remaining_time": "14:43:25"}
|
44 |
+
{"current_steps": 44, "total_steps": 519, "loss": 1.0735, "lr": 8.26923076923077e-06, "epoch": 0.25323741007194245, "percentage": 8.48, "elapsed_time": "1:21:38", "remaining_time": "14:41:21"}
|
45 |
+
{"current_steps": 45, "total_steps": 519, "loss": 1.0329, "lr": 8.461538461538462e-06, "epoch": 0.2589928057553957, "percentage": 8.67, "elapsed_time": "1:23:14", "remaining_time": "14:36:48"}
|
46 |
+
{"current_steps": 46, "total_steps": 519, "loss": 1.024, "lr": 8.653846153846155e-06, "epoch": 0.2647482014388489, "percentage": 8.86, "elapsed_time": "1:24:46", "remaining_time": "14:31:47"}
|
47 |
+
{"current_steps": 47, "total_steps": 519, "loss": 1.0477, "lr": 8.846153846153847e-06, "epoch": 0.27050359712230215, "percentage": 9.06, "elapsed_time": "1:26:26", "remaining_time": "14:28:04"}
|
48 |
+
{"current_steps": 48, "total_steps": 519, "loss": 1.0495, "lr": 9.03846153846154e-06, "epoch": 0.2762589928057554, "percentage": 9.25, "elapsed_time": "1:28:14", "remaining_time": "14:25:55"}
|
49 |
+
{"current_steps": 49, "total_steps": 519, "loss": 1.0559, "lr": 9.230769230769232e-06, "epoch": 0.2820143884892086, "percentage": 9.44, "elapsed_time": "1:29:58", "remaining_time": "14:23:03"}
|
50 |
+
{"current_steps": 50, "total_steps": 519, "loss": 1.0597, "lr": 9.423076923076923e-06, "epoch": 0.28776978417266186, "percentage": 9.63, "elapsed_time": "1:32:08", "remaining_time": "14:24:17"}
|
51 |
+
{"current_steps": 51, "total_steps": 519, "loss": 1.0003, "lr": 9.615384615384616e-06, "epoch": 0.2935251798561151, "percentage": 9.83, "elapsed_time": "1:33:38", "remaining_time": "14:19:14"}
|
52 |
+
{"current_steps": 52, "total_steps": 519, "loss": 1.0464, "lr": 9.807692307692308e-06, "epoch": 0.2992805755395683, "percentage": 10.02, "elapsed_time": "1:35:40", "remaining_time": "14:19:12"}
|
53 |
+
{"current_steps": 53, "total_steps": 519, "loss": 1.0209, "lr": 1e-05, "epoch": 0.30503597122302156, "percentage": 10.21, "elapsed_time": "1:37:16", "remaining_time": "14:15:16"}
|
54 |
+
{"current_steps": 54, "total_steps": 519, "loss": 0.9504, "lr": 9.99988686307906e-06, "epoch": 0.3107913669064748, "percentage": 10.4, "elapsed_time": "1:39:14", "remaining_time": "14:14:38"}
|
55 |
+
{"current_steps": 55, "total_steps": 519, "loss": 0.9384, "lr": 9.999547457436221e-06, "epoch": 0.31654676258992803, "percentage": 10.6, "elapsed_time": "1:41:03", "remaining_time": "14:12:33"}
|
56 |
+
{"current_steps": 56, "total_steps": 519, "loss": 0.9907, "lr": 9.99898179843121e-06, "epoch": 0.32230215827338127, "percentage": 10.79, "elapsed_time": "1:42:43", "remaining_time": "14:09:18"}
|
57 |
+
{"current_steps": 57, "total_steps": 519, "loss": 0.948, "lr": 9.998189911662793e-06, "epoch": 0.32805755395683456, "percentage": 10.98, "elapsed_time": "1:44:26", "remaining_time": "14:06:31"}
|
58 |
+
{"current_steps": 58, "total_steps": 519, "loss": 0.9162, "lr": 9.99717183296762e-06, "epoch": 0.3338129496402878, "percentage": 11.18, "elapsed_time": "1:46:46", "remaining_time": "14:08:40"}
|
59 |
+
{"current_steps": 59, "total_steps": 519, "loss": 1.0227, "lr": 9.995927608418612e-06, "epoch": 0.339568345323741, "percentage": 11.37, "elapsed_time": "1:48:28", "remaining_time": "14:05:45"}
|
60 |
+
{"current_steps": 60, "total_steps": 519, "loss": 0.9603, "lr": 9.994457294322858e-06, "epoch": 0.34532374100719426, "percentage": 11.56, "elapsed_time": "1:50:32", "remaining_time": "14:05:37"}
|
61 |
+
{"current_steps": 61, "total_steps": 519, "loss": 0.9107, "lr": 9.992760957219083e-06, "epoch": 0.3510791366906475, "percentage": 11.75, "elapsed_time": "1:52:24", "remaining_time": "14:04:02"}
|
62 |
+
{"current_steps": 62, "total_steps": 519, "loss": 0.9662, "lr": 9.99083867387463e-06, "epoch": 0.35683453237410073, "percentage": 11.95, "elapsed_time": "1:54:48", "remaining_time": "14:06:14"}
|
63 |
+
{"current_steps": 63, "total_steps": 519, "loss": 1.0106, "lr": 9.988690531281988e-06, "epoch": 0.36258992805755397, "percentage": 12.14, "elapsed_time": "1:56:13", "remaining_time": "14:01:12"}
|
64 |
+
{"current_steps": 64, "total_steps": 519, "loss": 0.916, "lr": 9.986316626654851e-06, "epoch": 0.3683453237410072, "percentage": 12.33, "elapsed_time": "1:57:53", "remaining_time": "13:58:08"}
|
65 |
+
{"current_steps": 65, "total_steps": 519, "loss": 0.9686, "lr": 9.983717067423721e-06, "epoch": 0.37410071942446044, "percentage": 12.52, "elapsed_time": "1:59:40", "remaining_time": "13:55:54"}
|
66 |
+
{"current_steps": 66, "total_steps": 519, "loss": 0.9517, "lr": 9.980891971231052e-06, "epoch": 0.37985611510791367, "percentage": 12.72, "elapsed_time": "2:01:32", "remaining_time": "13:54:15"}
|
67 |
+
{"current_steps": 67, "total_steps": 519, "loss": 0.8906, "lr": 9.977841465925918e-06, "epoch": 0.3856115107913669, "percentage": 12.91, "elapsed_time": "2:03:53", "remaining_time": "13:55:48"}
|
68 |
+
{"current_steps": 68, "total_steps": 519, "loss": 0.9203, "lr": 9.974565689558228e-06, "epoch": 0.39136690647482014, "percentage": 13.1, "elapsed_time": "2:05:53", "remaining_time": "13:54:55"}
|
69 |
+
{"current_steps": 69, "total_steps": 519, "loss": 0.9555, "lr": 9.971064790372484e-06, "epoch": 0.3971223021582734, "percentage": 13.29, "elapsed_time": "2:07:52", "remaining_time": "13:53:57"}
|
70 |
+
{"current_steps": 70, "total_steps": 519, "loss": 0.8692, "lr": 9.967338926801066e-06, "epoch": 0.4028776978417266, "percentage": 13.49, "elapsed_time": "2:09:37", "remaining_time": "13:51:29"}
|
71 |
+
{"current_steps": 71, "total_steps": 519, "loss": 0.936, "lr": 9.963388267457071e-06, "epoch": 0.40863309352517985, "percentage": 13.68, "elapsed_time": "2:11:16", "remaining_time": "13:48:18"}
|
72 |
+
{"current_steps": 72, "total_steps": 519, "loss": 0.9147, "lr": 9.959212991126668e-06, "epoch": 0.4143884892086331, "percentage": 13.87, "elapsed_time": "2:13:06", "remaining_time": "13:46:24"}
|
73 |
+
{"current_steps": 73, "total_steps": 519, "loss": 0.9762, "lr": 9.954813286761021e-06, "epoch": 0.4201438848920863, "percentage": 14.07, "elapsed_time": "2:15:25", "remaining_time": "13:47:22"}
|
74 |
+
{"current_steps": 74, "total_steps": 519, "loss": 0.9518, "lr": 9.950189353467735e-06, "epoch": 0.42589928057553955, "percentage": 14.26, "elapsed_time": "2:17:25", "remaining_time": "13:46:27"}
|
75 |
+
{"current_steps": 75, "total_steps": 519, "loss": 0.9229, "lr": 9.945341400501838e-06, "epoch": 0.4316546762589928, "percentage": 14.45, "elapsed_time": "2:18:53", "remaining_time": "13:42:14"}
|
76 |
+
{"current_steps": 76, "total_steps": 519, "loss": 1.0027, "lr": 9.940269647256319e-06, "epoch": 0.437410071942446, "percentage": 14.64, "elapsed_time": "2:21:14", "remaining_time": "13:43:15"}
|
77 |
+
{"current_steps": 77, "total_steps": 519, "loss": 1.1001, "lr": 9.934974323252195e-06, "epoch": 0.44316546762589926, "percentage": 14.84, "elapsed_time": "2:23:00", "remaining_time": "13:40:51"}
|
78 |
+
{"current_steps": 78, "total_steps": 519, "loss": 0.9906, "lr": 9.929455668128129e-06, "epoch": 0.4489208633093525, "percentage": 15.03, "elapsed_time": "2:24:49", "remaining_time": "13:38:49"}
|
79 |
+
{"current_steps": 79, "total_steps": 519, "loss": 0.879, "lr": 9.923713931629582e-06, "epoch": 0.4546762589928058, "percentage": 15.22, "elapsed_time": "2:26:10", "remaining_time": "13:34:08"}
|
80 |
+
{"current_steps": 80, "total_steps": 519, "loss": 0.8488, "lr": 9.917749373597506e-06, "epoch": 0.460431654676259, "percentage": 15.41, "elapsed_time": "2:27:56", "remaining_time": "13:31:50"}
|
81 |
+
{"current_steps": 81, "total_steps": 519, "loss": 0.9084, "lr": 9.911562263956593e-06, "epoch": 0.46618705035971225, "percentage": 15.61, "elapsed_time": "2:30:11", "remaining_time": "13:32:09"}
|
82 |
+
{"current_steps": 82, "total_steps": 519, "loss": 0.9151, "lr": 9.90515288270306e-06, "epoch": 0.4719424460431655, "percentage": 15.8, "elapsed_time": "2:32:11", "remaining_time": "13:31:03"}
|
83 |
+
{"current_steps": 83, "total_steps": 519, "loss": 0.9084, "lr": 9.898521519891968e-06, "epoch": 0.4776978417266187, "percentage": 15.99, "elapsed_time": "2:33:42", "remaining_time": "13:27:25"}
|
84 |
+
{"current_steps": 84, "total_steps": 519, "loss": 0.9244, "lr": 9.891668475624106e-06, "epoch": 0.48345323741007196, "percentage": 16.18, "elapsed_time": "2:36:03", "remaining_time": "13:28:07"}
|
85 |
+
{"current_steps": 85, "total_steps": 519, "loss": 0.8863, "lr": 9.884594060032407e-06, "epoch": 0.4892086330935252, "percentage": 16.38, "elapsed_time": "2:37:21", "remaining_time": "13:23:25"}
|
86 |
+
{"current_steps": 86, "total_steps": 519, "loss": 0.8856, "lr": 9.877298593267906e-06, "epoch": 0.4949640287769784, "percentage": 16.57, "elapsed_time": "2:39:26", "remaining_time": "13:22:44"}
|
87 |
+
{"current_steps": 87, "total_steps": 519, "loss": 0.9544, "lr": 9.869782405485267e-06, "epoch": 0.5007194244604316, "percentage": 16.76, "elapsed_time": "2:41:16", "remaining_time": "13:20:50"}
|
88 |
+
{"current_steps": 88, "total_steps": 519, "loss": 0.9075, "lr": 9.862045836827821e-06, "epoch": 0.5064748201438849, "percentage": 16.96, "elapsed_time": "2:42:44", "remaining_time": "13:17:05"}
|
89 |
+
{"current_steps": 89, "total_steps": 519, "loss": 0.8764, "lr": 9.854089237412194e-06, "epoch": 0.5122302158273381, "percentage": 17.15, "elapsed_time": "2:44:03", "remaining_time": "13:12:40"}
|
90 |
+
{"current_steps": 90, "total_steps": 519, "loss": 0.9502, "lr": 9.84591296731245e-06, "epoch": 0.5179856115107914, "percentage": 17.34, "elapsed_time": "2:45:58", "remaining_time": "13:11:11"}
|
91 |
+
{"current_steps": 91, "total_steps": 519, "loss": 1.076, "lr": 9.837517396543799e-06, "epoch": 0.5237410071942447, "percentage": 17.53, "elapsed_time": "2:48:05", "remaining_time": "13:10:35"}
|
92 |
+
{"current_steps": 92, "total_steps": 519, "loss": 0.9747, "lr": 9.82890290504585e-06, "epoch": 0.5294964028776978, "percentage": 17.73, "elapsed_time": "2:50:11", "remaining_time": "13:09:55"}
|
93 |
+
{"current_steps": 93, "total_steps": 519, "loss": 0.9277, "lr": 9.82006988266542e-06, "epoch": 0.5352517985611511, "percentage": 17.92, "elapsed_time": "2:52:23", "remaining_time": "13:09:37"}
|
94 |
+
{"current_steps": 94, "total_steps": 519, "loss": 0.8989, "lr": 9.811018729138892e-06, "epoch": 0.5410071942446043, "percentage": 18.11, "elapsed_time": "2:53:51", "remaining_time": "13:06:02"}
|
95 |
+
{"current_steps": 95, "total_steps": 519, "loss": 0.879, "lr": 9.801749854074122e-06, "epoch": 0.5467625899280576, "percentage": 18.3, "elapsed_time": "2:55:27", "remaining_time": "13:03:05"}
|
96 |
+
{"current_steps": 96, "total_steps": 519, "loss": 0.8718, "lr": 9.792263676931906e-06, "epoch": 0.5525179856115108, "percentage": 18.5, "elapsed_time": "2:57:17", "remaining_time": "13:01:13"}
|
97 |
+
{"current_steps": 97, "total_steps": 519, "loss": 0.8544, "lr": 9.78256062700699e-06, "epoch": 0.5582733812949641, "percentage": 18.69, "elapsed_time": "2:59:10", "remaining_time": "12:59:29"}
|
98 |
+
{"current_steps": 98, "total_steps": 519, "loss": 0.8972, "lr": 9.772641143408652e-06, "epoch": 0.5640287769784172, "percentage": 18.88, "elapsed_time": "3:01:16", "remaining_time": "12:58:45"}
|
99 |
+
{"current_steps": 99, "total_steps": 519, "loss": 0.9948, "lr": 9.762505675040826e-06, "epoch": 0.5697841726618705, "percentage": 19.08, "elapsed_time": "3:03:51", "remaining_time": "13:00:00"}
|
100 |
+
{"current_steps": 100, "total_steps": 519, "loss": 0.8942, "lr": 9.752154680581783e-06, "epoch": 0.5755395683453237, "percentage": 19.27, "elapsed_time": "3:05:31", "remaining_time": "12:57:20"}
|
101 |
+
{"current_steps": 101, "total_steps": 519, "loss": 0.8772, "lr": 9.741588628463384e-06, "epoch": 0.581294964028777, "percentage": 19.46, "elapsed_time": "3:07:57", "remaining_time": "12:57:52"}
|
102 |
+
{"current_steps": 102, "total_steps": 519, "loss": 0.9314, "lr": 9.730807996849864e-06, "epoch": 0.5870503597122302, "percentage": 19.65, "elapsed_time": "3:09:47", "remaining_time": "12:55:55"}
|
103 |
+
{"current_steps": 103, "total_steps": 519, "loss": 0.9802, "lr": 9.719813273616216e-06, "epoch": 0.5928057553956835, "percentage": 19.85, "elapsed_time": "3:11:58", "remaining_time": "12:55:21"}
|
104 |
+
{"current_steps": 104, "total_steps": 519, "loss": 0.927, "lr": 9.70860495632609e-06, "epoch": 0.5985611510791367, "percentage": 20.04, "elapsed_time": "3:13:37", "remaining_time": "12:52:38"}
|
105 |
+
{"current_steps": 105, "total_steps": 519, "loss": 0.924, "lr": 9.697183552209289e-06, "epoch": 0.60431654676259, "percentage": 20.23, "elapsed_time": "3:15:26", "remaining_time": "12:50:37"}
|
106 |
+
{"current_steps": 106, "total_steps": 519, "loss": 0.8945, "lr": 9.68554957813881e-06, "epoch": 0.6100719424460431, "percentage": 20.42, "elapsed_time": "3:16:59", "remaining_time": "12:47:31"}
|
107 |
+
{"current_steps": 107, "total_steps": 519, "loss": 0.8816, "lr": 9.673703560607459e-06, "epoch": 0.6158273381294964, "percentage": 20.62, "elapsed_time": "3:18:24", "remaining_time": "12:43:56"}
|
108 |
+
{"current_steps": 108, "total_steps": 519, "loss": 0.9575, "lr": 9.661646035704009e-06, "epoch": 0.6215827338129496, "percentage": 20.81, "elapsed_time": "3:20:21", "remaining_time": "12:42:26"}
|
109 |
+
{"current_steps": 109, "total_steps": 519, "loss": 0.8911, "lr": 9.649377549088962e-06, "epoch": 0.6273381294964029, "percentage": 21.0, "elapsed_time": "3:22:07", "remaining_time": "12:40:18"}
|
110 |
+
{"current_steps": 110, "total_steps": 519, "loss": 0.8356, "lr": 9.636898655969837e-06, "epoch": 0.6330935251798561, "percentage": 21.19, "elapsed_time": "3:24:12", "remaining_time": "12:39:16"}
|
111 |
+
{"current_steps": 111, "total_steps": 519, "loss": 0.9424, "lr": 9.62420992107605e-06, "epoch": 0.6388489208633094, "percentage": 21.39, "elapsed_time": "3:25:46", "remaining_time": "12:36:19"}
|
112 |
+
{"current_steps": 112, "total_steps": 519, "loss": 0.8761, "lr": 9.61131191863336e-06, "epoch": 0.6446043165467625, "percentage": 21.58, "elapsed_time": "3:27:33", "remaining_time": "12:34:16"}
|
113 |
+
{"current_steps": 113, "total_steps": 519, "loss": 1.0254, "lr": 9.598205232337882e-06, "epoch": 0.6503597122302158, "percentage": 21.77, "elapsed_time": "3:29:36", "remaining_time": "12:33:05"}
|
114 |
+
{"current_steps": 114, "total_steps": 519, "loss": 0.8775, "lr": 9.584890455329667e-06, "epoch": 0.6561151079136691, "percentage": 21.97, "elapsed_time": "3:31:28", "remaining_time": "12:31:18"}
|
115 |
+
{"current_steps": 115, "total_steps": 519, "loss": 0.9254, "lr": 9.571368190165865e-06, "epoch": 0.6618705035971223, "percentage": 22.16, "elapsed_time": "3:33:24", "remaining_time": "12:29:41"}
|
116 |
+
{"current_steps": 116, "total_steps": 519, "loss": 0.9581, "lr": 9.557639048793453e-06, "epoch": 0.6676258992805756, "percentage": 22.35, "elapsed_time": "3:34:51", "remaining_time": "12:26:26"}
|
117 |
+
{"current_steps": 117, "total_steps": 519, "loss": 0.8931, "lr": 9.543703652521543e-06, "epoch": 0.6733812949640288, "percentage": 22.54, "elapsed_time": "3:36:48", "remaining_time": "12:24:56"}
|
118 |
+
{"current_steps": 118, "total_steps": 519, "loss": 0.856, "lr": 9.52956263199327e-06, "epoch": 0.679136690647482, "percentage": 22.74, "elapsed_time": "3:38:42", "remaining_time": "12:23:12"}
|
119 |
+
{"current_steps": 119, "total_steps": 519, "loss": 0.9335, "lr": 9.515216627157238e-06, "epoch": 0.6848920863309352, "percentage": 22.93, "elapsed_time": "3:40:28", "remaining_time": "12:21:05"}
|
120 |
+
{"current_steps": 120, "total_steps": 519, "loss": 0.7234, "lr": 9.500666287238573e-06, "epoch": 0.6906474820143885, "percentage": 23.12, "elapsed_time": "3:42:21", "remaining_time": "12:19:18"}
|
121 |
+
{"current_steps": 121, "total_steps": 519, "loss": 0.8393, "lr": 9.485912270709542e-06, "epoch": 0.6964028776978417, "percentage": 23.31, "elapsed_time": "3:43:55", "remaining_time": "12:16:33"}
|
122 |
+
{"current_steps": 122, "total_steps": 519, "loss": 0.9068, "lr": 9.470955245259742e-06, "epoch": 0.702158273381295, "percentage": 23.51, "elapsed_time": "3:46:03", "remaining_time": "12:15:36"}
|
123 |
+
{"current_steps": 123, "total_steps": 519, "loss": 0.9699, "lr": 9.455795887765896e-06, "epoch": 0.7079136690647482, "percentage": 23.7, "elapsed_time": "3:47:31", "remaining_time": "12:12:31"}
|
124 |
+
{"current_steps": 124, "total_steps": 519, "loss": 0.9426, "lr": 9.440434884261216e-06, "epoch": 0.7136690647482015, "percentage": 23.89, "elapsed_time": "3:49:45", "remaining_time": "12:11:54"}
|
125 |
+
{"current_steps": 125, "total_steps": 519, "loss": 0.8158, "lr": 9.424872929904359e-06, "epoch": 0.7194244604316546, "percentage": 24.08, "elapsed_time": "3:51:20", "remaining_time": "12:09:12"}
|
126 |
+
{"current_steps": 126, "total_steps": 519, "loss": 0.8735, "lr": 9.409110728947964e-06, "epoch": 0.7251798561151079, "percentage": 24.28, "elapsed_time": "3:53:10", "remaining_time": "12:07:18"}
|
127 |
+
{"current_steps": 127, "total_steps": 519, "loss": 0.8947, "lr": 9.393148994706785e-06, "epoch": 0.7309352517985611, "percentage": 24.47, "elapsed_time": "3:54:30", "remaining_time": "12:03:50"}
|
128 |
+
{"current_steps": 128, "total_steps": 519, "loss": 0.9162, "lr": 9.376988449525405e-06, "epoch": 0.7366906474820144, "percentage": 24.66, "elapsed_time": "3:56:26", "remaining_time": "12:02:16"}
|
129 |
+
{"current_steps": 129, "total_steps": 519, "loss": 0.8933, "lr": 9.360629824745558e-06, "epoch": 0.7424460431654676, "percentage": 24.86, "elapsed_time": "3:58:05", "remaining_time": "11:59:48"}
|
130 |
+
{"current_steps": 130, "total_steps": 519, "loss": 0.8792, "lr": 9.344073860673016e-06, "epoch": 0.7482014388489209, "percentage": 25.05, "elapsed_time": "4:00:39", "remaining_time": "12:00:08"}
|
131 |
+
{"current_steps": 131, "total_steps": 519, "loss": 0.8591, "lr": 9.327321306544097e-06, "epoch": 0.753956834532374, "percentage": 25.24, "elapsed_time": "4:02:28", "remaining_time": "11:58:11"}
|
132 |
+
{"current_steps": 132, "total_steps": 519, "loss": 0.9339, "lr": 9.310372920491761e-06, "epoch": 0.7597122302158273, "percentage": 25.43, "elapsed_time": "4:04:59", "remaining_time": "11:58:16"}
|
133 |
+
{"current_steps": 133, "total_steps": 519, "loss": 0.8689, "lr": 9.293229469511293e-06, "epoch": 0.7654676258992805, "percentage": 25.63, "elapsed_time": "4:06:55", "remaining_time": "11:56:38"}
|
134 |
+
{"current_steps": 134, "total_steps": 519, "loss": 0.8855, "lr": 9.275891729425595e-06, "epoch": 0.7712230215827338, "percentage": 25.82, "elapsed_time": "4:08:39", "remaining_time": "11:54:25"}
|
135 |
+
{"current_steps": 135, "total_steps": 519, "loss": 0.8467, "lr": 9.25836048485008e-06, "epoch": 0.7769784172661871, "percentage": 26.01, "elapsed_time": "4:10:31", "remaining_time": "11:52:34"}
|
136 |
+
{"current_steps": 136, "total_steps": 519, "loss": 0.9453, "lr": 9.240636529157158e-06, "epoch": 0.7827338129496403, "percentage": 26.2, "elapsed_time": "4:12:50", "remaining_time": "11:52:02"}
|
137 |
+
{"current_steps": 137, "total_steps": 519, "loss": 0.8845, "lr": 9.22272066444034e-06, "epoch": 0.7884892086330936, "percentage": 26.4, "elapsed_time": "4:14:56", "remaining_time": "11:50:52"}
|
138 |
+
{"current_steps": 138, "total_steps": 519, "loss": 0.9174, "lr": 9.204613701477935e-06, "epoch": 0.7942446043165468, "percentage": 26.59, "elapsed_time": "4:16:28", "remaining_time": "11:48:06"}
|
139 |
+
{"current_steps": 139, "total_steps": 519, "loss": 0.94, "lr": 9.186316459696359e-06, "epoch": 0.8, "percentage": 26.78, "elapsed_time": "4:18:10", "remaining_time": "11:45:48"}
|
140 |
+
{"current_steps": 140, "total_steps": 519, "loss": 0.8102, "lr": 9.167829767133047e-06, "epoch": 0.8057553956834532, "percentage": 26.97, "elapsed_time": "4:20:02", "remaining_time": "11:43:57"}
|
141 |
+
{"current_steps": 141, "total_steps": 519, "loss": 0.9431, "lr": 9.149154460398993e-06, "epoch": 0.8115107913669065, "percentage": 27.17, "elapsed_time": "4:21:46", "remaining_time": "11:41:45"}
|
142 |
+
{"current_steps": 142, "total_steps": 519, "loss": 0.9047, "lr": 9.130291384640873e-06, "epoch": 0.8172661870503597, "percentage": 27.36, "elapsed_time": "4:23:45", "remaining_time": "11:40:15"}
|
143 |
+
{"current_steps": 143, "total_steps": 519, "loss": 0.8925, "lr": 9.111241393502814e-06, "epoch": 0.823021582733813, "percentage": 27.55, "elapsed_time": "4:26:28", "remaining_time": "11:40:40"}
|
144 |
+
{"current_steps": 144, "total_steps": 519, "loss": 0.8559, "lr": 9.092005349087754e-06, "epoch": 0.8287769784172662, "percentage": 27.75, "elapsed_time": "4:28:47", "remaining_time": "11:39:59"}
|
145 |
+
{"current_steps": 145, "total_steps": 519, "loss": 1.0086, "lr": 9.072584121918426e-06, "epoch": 0.8345323741007195, "percentage": 27.94, "elapsed_time": "4:30:22", "remaining_time": "11:37:23"}
|
146 |
+
{"current_steps": 146, "total_steps": 519, "loss": 0.8812, "lr": 9.052978590897964e-06, "epoch": 0.8402877697841726, "percentage": 28.13, "elapsed_time": "4:32:19", "remaining_time": "11:35:45"}
|
147 |
+
{"current_steps": 147, "total_steps": 519, "loss": 0.8282, "lr": 9.033189643270139e-06, "epoch": 0.8460431654676259, "percentage": 28.32, "elapsed_time": "4:33:48", "remaining_time": "11:32:54"}
|
148 |
+
{"current_steps": 148, "total_steps": 519, "loss": 0.8924, "lr": 9.013218174579189e-06, "epoch": 0.8517985611510791, "percentage": 28.52, "elapsed_time": "4:35:38", "remaining_time": "11:30:58"}
|
149 |
+
{"current_steps": 149, "total_steps": 519, "loss": 0.8948, "lr": 8.993065088629304e-06, "epoch": 0.8575539568345324, "percentage": 28.71, "elapsed_time": "4:37:35", "remaining_time": "11:29:20"}
|
150 |
+
{"current_steps": 150, "total_steps": 519, "loss": 0.9222, "lr": 8.972731297443722e-06, "epoch": 0.8633093525179856, "percentage": 28.9, "elapsed_time": "4:39:45", "remaining_time": "11:28:11"}
|
151 |
+
{"current_steps": 151, "total_steps": 519, "loss": 0.9079, "lr": 8.95221772122345e-06, "epoch": 0.8690647482014389, "percentage": 29.09, "elapsed_time": "4:41:59", "remaining_time": "11:27:15"}
|
152 |
+
{"current_steps": 152, "total_steps": 519, "loss": 0.8233, "lr": 8.931525288305633e-06, "epoch": 0.874820143884892, "percentage": 29.29, "elapsed_time": "4:43:50", "remaining_time": "11:25:19"}
|
153 |
+
{"current_steps": 153, "total_steps": 519, "loss": 0.9171, "lr": 8.910654935121528e-06, "epoch": 0.8805755395683453, "percentage": 29.48, "elapsed_time": "4:45:41", "remaining_time": "11:23:25"}
|
154 |
+
{"current_steps": 154, "total_steps": 519, "loss": 0.9143, "lr": 8.889607606154132e-06, "epoch": 0.8863309352517985, "percentage": 29.67, "elapsed_time": "4:47:47", "remaining_time": "11:22:05"}
|
155 |
+
{"current_steps": 155, "total_steps": 519, "loss": 0.9422, "lr": 8.868384253895445e-06, "epoch": 0.8920863309352518, "percentage": 29.87, "elapsed_time": "4:49:26", "remaining_time": "11:19:43"}
|
156 |
+
{"current_steps": 156, "total_steps": 519, "loss": 0.9429, "lr": 8.846985838803357e-06, "epoch": 0.897841726618705, "percentage": 30.06, "elapsed_time": "4:51:21", "remaining_time": "11:17:58"}
|
157 |
+
{"current_steps": 157, "total_steps": 519, "loss": 0.8393, "lr": 8.825413329258187e-06, "epoch": 0.9035971223021583, "percentage": 30.25, "elapsed_time": "4:52:50", "remaining_time": "11:15:12"}
|
158 |
+
{"current_steps": 158, "total_steps": 519, "loss": 0.8822, "lr": 8.803667701518857e-06, "epoch": 0.9093525179856116, "percentage": 30.44, "elapsed_time": "4:55:03", "remaining_time": "11:14:08"}
|
159 |
+
{"current_steps": 159, "total_steps": 519, "loss": 0.8766, "lr": 8.781749939678712e-06, "epoch": 0.9151079136690647, "percentage": 30.64, "elapsed_time": "4:57:08", "remaining_time": "11:12:46"}
|
160 |
+
{"current_steps": 160, "total_steps": 519, "loss": 0.8679, "lr": 8.759661035620992e-06, "epoch": 0.920863309352518, "percentage": 30.83, "elapsed_time": "4:59:10", "remaining_time": "11:11:15"}
|
161 |
+
{"current_steps": 161, "total_steps": 519, "loss": 0.9487, "lr": 8.73740198897393e-06, "epoch": 0.9266187050359712, "percentage": 31.02, "elapsed_time": "5:01:11", "remaining_time": "11:09:43"}
|
162 |
+
{"current_steps": 162, "total_steps": 519, "loss": 0.9371, "lr": 8.714973807065525e-06, "epoch": 0.9323741007194245, "percentage": 31.21, "elapsed_time": "5:03:16", "remaining_time": "11:08:20"}
|
163 |
+
{"current_steps": 163, "total_steps": 519, "loss": 0.8064, "lr": 8.69237750487796e-06, "epoch": 0.9381294964028777, "percentage": 31.41, "elapsed_time": "5:05:16", "remaining_time": "11:06:44"}
|
164 |
+
{"current_steps": 164, "total_steps": 519, "loss": 0.9083, "lr": 8.669614105001652e-06, "epoch": 0.943884892086331, "percentage": 31.6, "elapsed_time": "5:06:57", "remaining_time": "11:04:26"}
|
165 |
+
{"current_steps": 165, "total_steps": 519, "loss": 0.9082, "lr": 8.646684637588992e-06, "epoch": 0.9496402877697842, "percentage": 31.79, "elapsed_time": "5:08:41", "remaining_time": "11:02:16"}
|
166 |
+
{"current_steps": 166, "total_steps": 519, "loss": 0.8174, "lr": 8.623590140307715e-06, "epoch": 0.9553956834532374, "percentage": 31.98, "elapsed_time": "5:11:33", "remaining_time": "11:02:32"}
|
167 |
+
{"current_steps": 167, "total_steps": 519, "loss": 0.8157, "lr": 8.600331658293948e-06, "epoch": 0.9611510791366906, "percentage": 32.18, "elapsed_time": "5:13:06", "remaining_time": "10:59:58"}
|
168 |
+
{"current_steps": 168, "total_steps": 519, "loss": 0.7305, "lr": 8.576910244104905e-06, "epoch": 0.9669064748201439, "percentage": 32.37, "elapsed_time": "5:14:19", "remaining_time": "10:56:42"}
|
169 |
+
{"current_steps": 169, "total_steps": 519, "loss": 0.8627, "lr": 8.553326957671264e-06, "epoch": 0.9726618705035971, "percentage": 32.56, "elapsed_time": "5:16:20", "remaining_time": "10:55:08"}
|
170 |
+
{"current_steps": 170, "total_steps": 519, "loss": 0.9088, "lr": 8.529582866249187e-06, "epoch": 0.9784172661870504, "percentage": 32.76, "elapsed_time": "5:18:06", "remaining_time": "10:53:03"}
|
171 |
+
{"current_steps": 171, "total_steps": 519, "loss": 0.8252, "lr": 8.50567904437203e-06, "epoch": 0.9841726618705036, "percentage": 32.95, "elapsed_time": "5:19:35", "remaining_time": "10:50:24"}
|
172 |
+
{"current_steps": 172, "total_steps": 519, "loss": 1.0713, "lr": 8.48161657380172e-06, "epoch": 0.9899280575539569, "percentage": 33.14, "elapsed_time": "5:21:17", "remaining_time": "10:48:10"}
|
173 |
+
{"current_steps": 173, "total_steps": 519, "loss": 0.9502, "lr": 8.457396543479787e-06, "epoch": 0.99568345323741, "percentage": 33.33, "elapsed_time": "5:22:35", "remaining_time": "10:45:11"}
|
174 |
+
{"current_steps": 174, "total_steps": 519, "loss": 0.9039, "lr": 8.433020049478093e-06, "epoch": 1.0, "percentage": 33.53, "elapsed_time": "5:23:56", "remaining_time": "10:42:18"}
|
175 |
+
{"current_steps": 175, "total_steps": 519, "loss": 0.6914, "lr": 8.408488194949229e-06, "epoch": 1.0057553956834533, "percentage": 33.72, "elapsed_time": "5:26:04", "remaining_time": "10:40:58"}
|
176 |
+
{"current_steps": 176, "total_steps": 519, "loss": 0.6407, "lr": 8.383802090076589e-06, "epoch": 1.0115107913669066, "percentage": 33.91, "elapsed_time": "5:27:53", "remaining_time": "10:39:00"}
|
177 |
+
{"current_steps": 177, "total_steps": 519, "loss": 0.7788, "lr": 8.358962852024128e-06, "epoch": 1.0172661870503596, "percentage": 34.1, "elapsed_time": "5:30:21", "remaining_time": "10:38:18"}
|
178 |
+
{"current_steps": 178, "total_steps": 519, "loss": 0.7455, "lr": 8.333971604885817e-06, "epoch": 1.023021582733813, "percentage": 34.3, "elapsed_time": "5:32:34", "remaining_time": "10:37:06"}
|
179 |
+
{"current_steps": 179, "total_steps": 519, "loss": 0.6916, "lr": 8.308829479634753e-06, "epoch": 1.0287769784172662, "percentage": 34.49, "elapsed_time": "5:34:10", "remaining_time": "10:34:45"}
|
180 |
+
{"current_steps": 180, "total_steps": 519, "loss": 0.7177, "lr": 8.283537614071987e-06, "epoch": 1.0345323741007195, "percentage": 34.68, "elapsed_time": "5:36:25", "remaining_time": "10:33:35"}
|
181 |
+
{"current_steps": 181, "total_steps": 519, "loss": 0.7206, "lr": 8.258097152775045e-06, "epoch": 1.0402877697841726, "percentage": 34.87, "elapsed_time": "5:38:56", "remaining_time": "10:32:57"}
|
182 |
+
{"current_steps": 182, "total_steps": 519, "loss": 0.7317, "lr": 8.232509247046106e-06, "epoch": 1.0460431654676259, "percentage": 35.07, "elapsed_time": "5:40:59", "remaining_time": "10:31:23"}
|
183 |
+
{"current_steps": 183, "total_steps": 519, "loss": 0.7968, "lr": 8.206775054859914e-06, "epoch": 1.0517985611510792, "percentage": 35.26, "elapsed_time": "5:42:31", "remaining_time": "10:28:54"}
|
184 |
+
{"current_steps": 184, "total_steps": 519, "loss": 0.7372, "lr": 8.180895740811381e-06, "epoch": 1.0575539568345325, "percentage": 35.45, "elapsed_time": "5:44:22", "remaining_time": "10:26:58"}
|
185 |
+
{"current_steps": 185, "total_steps": 519, "loss": 0.7368, "lr": 8.154872476062868e-06, "epoch": 1.0633093525179855, "percentage": 35.65, "elapsed_time": "5:46:12", "remaining_time": "10:25:01"}
|
186 |
+
{"current_steps": 186, "total_steps": 519, "loss": 0.6986, "lr": 8.128706438291193e-06, "epoch": 1.0690647482014388, "percentage": 35.84, "elapsed_time": "5:48:04", "remaining_time": "10:23:10"}
|
187 |
+
{"current_steps": 187, "total_steps": 519, "loss": 0.6884, "lr": 8.102398811634338e-06, "epoch": 1.074820143884892, "percentage": 36.03, "elapsed_time": "5:49:52", "remaining_time": "10:21:10"}
|
188 |
+
{"current_steps": 188, "total_steps": 519, "loss": 0.7078, "lr": 8.075950786637847e-06, "epoch": 1.0805755395683454, "percentage": 36.22, "elapsed_time": "5:52:02", "remaining_time": "10:19:49"}
|
189 |
+
{"current_steps": 189, "total_steps": 519, "loss": 0.6763, "lr": 8.049363560200972e-06, "epoch": 1.0863309352517985, "percentage": 36.42, "elapsed_time": "5:53:37", "remaining_time": "10:17:27"}
|
190 |
+
{"current_steps": 190, "total_steps": 519, "loss": 0.6993, "lr": 8.022638335522484e-06, "epoch": 1.0920863309352518, "percentage": 36.61, "elapsed_time": "5:55:28", "remaining_time": "10:15:31"}
|
191 |
+
{"current_steps": 191, "total_steps": 519, "loss": 0.7091, "lr": 7.995776322046236e-06, "epoch": 1.097841726618705, "percentage": 36.8, "elapsed_time": "5:57:30", "remaining_time": "10:13:55"}
|
192 |
+
{"current_steps": 192, "total_steps": 519, "loss": 0.7383, "lr": 7.968778735406426e-06, "epoch": 1.1035971223021583, "percentage": 36.99, "elapsed_time": "5:59:32", "remaining_time": "10:12:20"}
|
193 |
+
{"current_steps": 193, "total_steps": 519, "loss": 0.6835, "lr": 7.941646797372584e-06, "epoch": 1.1093525179856114, "percentage": 37.19, "elapsed_time": "6:01:27", "remaining_time": "10:10:32"}
|
194 |
+
{"current_steps": 194, "total_steps": 519, "loss": 0.724, "lr": 7.914381735794282e-06, "epoch": 1.1151079136690647, "percentage": 37.38, "elapsed_time": "6:03:10", "remaining_time": "10:08:24"}
|
195 |
+
{"current_steps": 195, "total_steps": 519, "loss": 0.7099, "lr": 7.886984784545565e-06, "epoch": 1.120863309352518, "percentage": 37.57, "elapsed_time": "6:04:40", "remaining_time": "10:05:55"}
|
196 |
+
{"current_steps": 196, "total_steps": 519, "loss": 0.7086, "lr": 7.859457183469119e-06, "epoch": 1.1266187050359713, "percentage": 37.76, "elapsed_time": "6:06:24", "remaining_time": "10:03:49"}
|
197 |
+
{"current_steps": 197, "total_steps": 519, "loss": 0.7393, "lr": 7.831800178320153e-06, "epoch": 1.1323741007194243, "percentage": 37.96, "elapsed_time": "6:08:21", "remaining_time": "10:02:04"}
|
198 |
+
{"current_steps": 198, "total_steps": 519, "loss": 0.6799, "lr": 7.804015020710028e-06, "epoch": 1.1381294964028776, "percentage": 38.15, "elapsed_time": "6:10:05", "remaining_time": "9:59:59"}
|
199 |
+
{"current_steps": 199, "total_steps": 519, "loss": 0.6896, "lr": 7.776102968049616e-06, "epoch": 1.143884892086331, "percentage": 38.34, "elapsed_time": "6:12:00", "remaining_time": "9:58:11"}
|
200 |
+
{"current_steps": 200, "total_steps": 519, "loss": 0.698, "lr": 7.748065283492397e-06, "epoch": 1.1496402877697842, "percentage": 38.54, "elapsed_time": "6:13:22", "remaining_time": "9:55:32"}
|
201 |
+
{"current_steps": 201, "total_steps": 519, "loss": 0.6811, "lr": 7.719903235877289e-06, "epoch": 1.1553956834532375, "percentage": 38.73, "elapsed_time": "6:16:52", "remaining_time": "9:56:15"}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afeaa72e60317db0103ce9f656b57e6c0b760412d9d115259a64bbae79d99eab
|
3 |
+
size 7864
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|