simonycl commited on
Commit
897c1f4
·
verified ·
1 Parent(s): 3aa2ee6

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: other
4
+ base_model: google/gemma-3-12b-it
5
+ tags:
6
+ - llama-factory
7
+ - full
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: sft
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # sft
18
+
19
+ This model is a fine-tuned version of [google/gemma-3-12b-it](https://huggingface.co/google/gemma-3-12b-it) on the cmv-gemma-3-27b-it dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 1e-05
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 2
44
+ - gradient_accumulation_steps: 32
45
+ - total_train_batch_size: 64
46
+ - total_eval_batch_size: 16
47
+ - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.05
50
+ - num_epochs: 1.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.51.3
59
+ - Pytorch 2.6.0+cu124
60
+ - Datasets 3.3.2
61
+ - Tokenizers 0.21.0
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9984162179284131,
3
+ "total_flos": 318229560426496.0,
4
+ "train_loss": 0.8606352945269667,
5
+ "train_runtime": 15262.586,
6
+ "train_samples_per_second": 1.655,
7
+ "train_steps_per_second": 0.026
8
+ }
chat_template.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n"
3
+ }
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "eoi_token_index": 256000,
7
+ "eos_token_id": [
8
+ 1,
9
+ 106
10
+ ],
11
+ "hidden_size": 3840,
12
+ "image_token_index": 262144,
13
+ "initializer_range": 0.02,
14
+ "mm_tokens_per_image": 256,
15
+ "model_type": "gemma3",
16
+ "text_config": {
17
+ "attention_bias": false,
18
+ "attention_dropout": 0.0,
19
+ "attn_logit_softcapping": null,
20
+ "cache_implementation": "hybrid",
21
+ "final_logit_softcapping": null,
22
+ "head_dim": 256,
23
+ "hidden_activation": "gelu_pytorch_tanh",
24
+ "hidden_size": 3840,
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 15360,
27
+ "max_position_embeddings": 131072,
28
+ "model_type": "gemma3_text",
29
+ "num_attention_heads": 16,
30
+ "num_hidden_layers": 48,
31
+ "num_key_value_heads": 8,
32
+ "query_pre_attn_scalar": 256,
33
+ "rms_norm_eps": 1e-06,
34
+ "rope_local_base_freq": 10000.0,
35
+ "rope_scaling": {
36
+ "factor": 8.0,
37
+ "rope_type": "linear"
38
+ },
39
+ "rope_theta": 1000000.0,
40
+ "sliding_window": 1024,
41
+ "sliding_window_pattern": 6,
42
+ "torch_dtype": "float32",
43
+ "use_cache": false,
44
+ "vocab_size": 262208
45
+ },
46
+ "torch_dtype": "bfloat16",
47
+ "transformers_version": "4.51.3",
48
+ "use_cache": false,
49
+ "vision_config": {
50
+ "attention_dropout": 0.0,
51
+ "hidden_act": "gelu_pytorch_tanh",
52
+ "hidden_size": 1152,
53
+ "image_size": 896,
54
+ "intermediate_size": 4304,
55
+ "layer_norm_eps": 1e-06,
56
+ "model_type": "siglip_vision_model",
57
+ "num_attention_heads": 16,
58
+ "num_channels": 3,
59
+ "num_hidden_layers": 27,
60
+ "patch_size": 14,
61
+ "torch_dtype": "float32",
62
+ "vision_use_head": false
63
+ }
64
+ }
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "top_k": 64,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.51.3"
13
+ }
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a62761566b053ef2831e4489457c6248eba1a1232ad004b3c6385cd6a5e0e380
3
+ size 4979902192
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba5f916a5feb17697cdc3b7510b33fb46afc4a649681f803ed33ff4b358392e1
3
+ size 4931296592
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ad2fb33ab5a677b09c2e5da2ddf080d4a2c61e2f0c381e2c57bcbd5f9c78c47
3
+ size 4931296656
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0469bff466c0ecb1f11f489a61494a0fadfb907f104e8d3a8f730b7d76c8383
3
+ size 4931296656
model-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172fffe7f9dbbd0c241b4b40e537859b15c05c910ada335790dadbacd192871f
3
+ size 4601000928
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_pan_and_scan": null,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Gemma3ImageProcessor",
13
+ "image_seq_length": 256,
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "pan_and_scan_max_num_crops": null,
20
+ "pan_and_scan_min_crop_size": null,
21
+ "pan_and_scan_min_ratio_to_activate": null,
22
+ "processor_class": "Gemma3Processor",
23
+ "resample": 2,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "height": 896,
27
+ "width": 896
28
+ }
29
+ }
processor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "image_seq_length": 256,
3
+ "processor_class": "Gemma3Processor"
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<end_of_turn>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9984162179284131,
3
+ "total_flos": 318229560426496.0,
4
+ "train_loss": 0.8606352945269667,
5
+ "train_runtime": 15262.586,
6
+ "train_samples_per_second": 1.655,
7
+ "train_steps_per_second": 0.026
8
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 394, "loss": 1.1276, "lr": 4.5e-06, "epoch": 0.025340513145391194, "percentage": 2.54, "elapsed_time": "0:06:41", "remaining_time": "4:16:49"}
2
+ {"current_steps": 20, "total_steps": 394, "loss": 0.9532, "lr": 9.5e-06, "epoch": 0.05068102629078239, "percentage": 5.08, "elapsed_time": "0:12:57", "remaining_time": "4:02:13"}
3
+ {"current_steps": 30, "total_steps": 394, "loss": 0.9247, "lr": 9.985718470743916e-06, "epoch": 0.07602153943617358, "percentage": 7.61, "elapsed_time": "0:19:23", "remaining_time": "3:55:17"}
4
+ {"current_steps": 40, "total_steps": 394, "loss": 0.9131, "lr": 9.936454954953108e-06, "epoch": 0.10136205258156478, "percentage": 10.15, "elapsed_time": "0:25:41", "remaining_time": "3:47:19"}
5
+ {"current_steps": 50, "total_steps": 394, "loss": 0.9038, "lr": 9.852380451890723e-06, "epoch": 0.12670256572695598, "percentage": 12.69, "elapsed_time": "0:32:08", "remaining_time": "3:41:10"}
6
+ {"current_steps": 60, "total_steps": 394, "loss": 0.9032, "lr": 9.734087839742152e-06, "epoch": 0.15204307887234716, "percentage": 15.23, "elapsed_time": "0:38:31", "remaining_time": "3:34:26"}
7
+ {"current_steps": 70, "total_steps": 394, "loss": 0.8988, "lr": 9.58241129660755e-06, "epoch": 0.17738359201773837, "percentage": 17.77, "elapsed_time": "0:44:53", "remaining_time": "3:27:46"}
8
+ {"current_steps": 80, "total_steps": 394, "loss": 0.9, "lr": 9.398420418028789e-06, "epoch": 0.20272410516312955, "percentage": 20.3, "elapsed_time": "0:51:17", "remaining_time": "3:21:19"}
9
+ {"current_steps": 90, "total_steps": 394, "loss": 0.889, "lr": 9.183412674395193e-06, "epoch": 0.22806461830852076, "percentage": 22.84, "elapsed_time": "0:57:34", "remaining_time": "3:14:27"}
10
+ {"current_steps": 100, "total_steps": 394, "loss": 0.8862, "lr": 8.938904261417088e-06, "epoch": 0.25340513145391197, "percentage": 25.38, "elapsed_time": "1:04:03", "remaining_time": "3:08:20"}
11
+ {"current_steps": 110, "total_steps": 394, "loss": 0.8874, "lr": 8.666619408187953e-06, "epoch": 0.2787456445993031, "percentage": 27.92, "elapsed_time": "1:10:28", "remaining_time": "3:01:57"}
12
+ {"current_steps": 120, "total_steps": 394, "loss": 0.8809, "lr": 8.368478218232787e-06, "epoch": 0.30408615774469433, "percentage": 30.46, "elapsed_time": "1:16:45", "remaining_time": "2:55:16"}
13
+ {"current_steps": 130, "total_steps": 394, "loss": 0.8759, "lr": 8.046583129285422e-06, "epoch": 0.32942667089008554, "percentage": 32.99, "elapsed_time": "1:23:10", "remaining_time": "2:48:55"}
14
+ {"current_steps": 140, "total_steps": 394, "loss": 0.8684, "lr": 7.703204087277989e-06, "epoch": 0.35476718403547675, "percentage": 35.53, "elapsed_time": "1:29:30", "remaining_time": "2:42:24"}
15
+ {"current_steps": 150, "total_steps": 394, "loss": 0.8676, "lr": 7.340762539092858e-06, "epoch": 0.3801076971808679, "percentage": 38.07, "elapsed_time": "1:35:51", "remaining_time": "2:35:55"}
16
+ {"current_steps": 160, "total_steps": 394, "loss": 0.8718, "lr": 6.961814356957308e-06, "epoch": 0.4054482103262591, "percentage": 40.61, "elapsed_time": "1:42:19", "remaining_time": "2:29:38"}
17
+ {"current_steps": 170, "total_steps": 394, "loss": 0.8613, "lr": 6.569031814894962e-06, "epoch": 0.4307887234716503, "percentage": 43.15, "elapsed_time": "1:48:42", "remaining_time": "2:23:14"}
18
+ {"current_steps": 180, "total_steps": 394, "loss": 0.8587, "lr": 6.165184744332824e-06, "epoch": 0.4561292366170415, "percentage": 45.69, "elapsed_time": "1:55:09", "remaining_time": "2:16:54"}
19
+ {"current_steps": 190, "total_steps": 394, "loss": 0.8496, "lr": 5.753121001751161e-06, "epoch": 0.4814697497624327, "percentage": 48.22, "elapsed_time": "2:01:32", "remaining_time": "2:10:30"}
20
+ {"current_steps": 200, "total_steps": 394, "loss": 0.8507, "lr": 5.335746386114814e-06, "epoch": 0.5068102629078239, "percentage": 50.76, "elapsed_time": "2:07:56", "remaining_time": "2:04:06"}
21
+ {"current_steps": 210, "total_steps": 394, "loss": 0.8465, "lr": 4.9160041477046e-06, "epoch": 0.532150776053215, "percentage": 53.3, "elapsed_time": "2:14:18", "remaining_time": "1:57:40"}
22
+ {"current_steps": 220, "total_steps": 394, "loss": 0.8543, "lr": 4.4968542328488e-06, "epoch": 0.5574912891986062, "percentage": 55.84, "elapsed_time": "2:20:36", "remaining_time": "1:51:12"}
23
+ {"current_steps": 230, "total_steps": 394, "loss": 0.841, "lr": 4.081252410917148e-06, "epoch": 0.5828318023439975, "percentage": 58.38, "elapsed_time": "2:26:59", "remaining_time": "1:44:48"}
24
+ {"current_steps": 240, "total_steps": 394, "loss": 0.8358, "lr": 3.6721294307699786e-06, "epoch": 0.6081723154893887, "percentage": 60.91, "elapsed_time": "2:33:27", "remaining_time": "1:38:28"}
25
+ {"current_steps": 250, "total_steps": 394, "loss": 0.8242, "lr": 3.272370353647465e-06, "epoch": 0.6335128286347799, "percentage": 63.45, "elapsed_time": "2:39:46", "remaining_time": "1:32:01"}
26
+ {"current_steps": 260, "total_steps": 394, "loss": 0.83, "lr": 2.8847942082397112e-06, "epoch": 0.6588533417801711, "percentage": 65.99, "elapsed_time": "2:46:09", "remaining_time": "1:25:38"}
27
+ {"current_steps": 270, "total_steps": 394, "loss": 0.8256, "lr": 2.512134111406422e-06, "epoch": 0.6841938549255623, "percentage": 68.53, "elapsed_time": "2:52:37", "remaining_time": "1:19:16"}
28
+ {"current_steps": 280, "total_steps": 394, "loss": 0.8219, "lr": 2.1570179947312674e-06, "epoch": 0.7095343680709535, "percentage": 71.07, "elapsed_time": "2:59:01", "remaining_time": "1:12:53"}
29
+ {"current_steps": 290, "total_steps": 394, "loss": 0.8211, "lr": 1.8219500728237849e-06, "epoch": 0.7348748812163446, "percentage": 73.6, "elapsed_time": "3:05:29", "remaining_time": "1:06:31"}
30
+ {"current_steps": 300, "total_steps": 394, "loss": 0.8124, "lr": 1.509293184050995e-06, "epoch": 0.7602153943617358, "percentage": 76.14, "elapsed_time": "3:11:45", "remaining_time": "1:00:04"}
31
+ {"current_steps": 310, "total_steps": 394, "loss": 0.8189, "lr": 1.2212521282287093e-06, "epoch": 0.785555907507127, "percentage": 78.68, "elapsed_time": "3:18:13", "remaining_time": "0:53:42"}
32
+ {"current_steps": 320, "total_steps": 394, "loss": 0.8151, "lr": 9.59858118772105e-07, "epoch": 0.8108964206525182, "percentage": 81.22, "elapsed_time": "3:24:35", "remaining_time": "0:47:18"}
33
+ {"current_steps": 330, "total_steps": 394, "loss": 0.8189, "lr": 7.269544589461968e-07, "epoch": 0.8362369337979094, "percentage": 83.76, "elapsed_time": "3:30:52", "remaining_time": "0:40:53"}
34
+ {"current_steps": 340, "total_steps": 394, "loss": 0.8107, "lr": 5.241835432246888e-07, "epoch": 0.8615774469433006, "percentage": 86.29, "elapsed_time": "3:37:16", "remaining_time": "0:34:30"}
35
+ {"current_steps": 350, "total_steps": 394, "loss": 0.8062, "lr": 3.5297527542127675e-07, "epoch": 0.8869179600886918, "percentage": 88.83, "elapsed_time": "3:43:41", "remaining_time": "0:28:07"}
36
+ {"current_steps": 360, "total_steps": 394, "loss": 0.8155, "lr": 2.1453698526664513e-07, "epoch": 0.912258473234083, "percentage": 91.37, "elapsed_time": "3:50:06", "remaining_time": "0:21:43"}
37
+ {"current_steps": 370, "total_steps": 394, "loss": 0.8078, "lr": 1.0984491453762402e-07, "epoch": 0.9375989863794741, "percentage": 93.91, "elapsed_time": "3:56:28", "remaining_time": "0:15:20"}
38
+ {"current_steps": 380, "total_steps": 394, "loss": 0.802, "lr": 3.963733277679904e-08, "epoch": 0.9629394995248653, "percentage": 96.45, "elapsed_time": "4:02:52", "remaining_time": "0:08:56"}
39
+ {"current_steps": 390, "total_steps": 394, "loss": 0.8076, "lr": 4.409331149256013e-09, "epoch": 0.9882800126702566, "percentage": 98.98, "elapsed_time": "4:09:11", "remaining_time": "0:02:33"}
40
+ {"current_steps": 394, "total_steps": 394, "epoch": 0.9984162179284131, "percentage": 100.0, "elapsed_time": "4:14:21", "remaining_time": "0:00:00"}
trainer_state.json ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 0.9984162179284131,
6
+ "eval_steps": 500,
7
+ "global_step": 394,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.025340513145391194,
14
+ "grad_norm": 7.014422278343604,
15
+ "learning_rate": 4.5e-06,
16
+ "loss": 1.1276,
17
+ "step": 10
18
+ },
19
+ {
20
+ "epoch": 0.05068102629078239,
21
+ "grad_norm": 3.950172585471577,
22
+ "learning_rate": 9.5e-06,
23
+ "loss": 0.9532,
24
+ "step": 20
25
+ },
26
+ {
27
+ "epoch": 0.07602153943617358,
28
+ "grad_norm": 2.8852145535313203,
29
+ "learning_rate": 9.985718470743916e-06,
30
+ "loss": 0.9247,
31
+ "step": 30
32
+ },
33
+ {
34
+ "epoch": 0.10136205258156478,
35
+ "grad_norm": 4.372398694120704,
36
+ "learning_rate": 9.936454954953108e-06,
37
+ "loss": 0.9131,
38
+ "step": 40
39
+ },
40
+ {
41
+ "epoch": 0.12670256572695598,
42
+ "grad_norm": 2.678600773280688,
43
+ "learning_rate": 9.852380451890723e-06,
44
+ "loss": 0.9038,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.15204307887234716,
49
+ "grad_norm": 3.1616554933826584,
50
+ "learning_rate": 9.734087839742152e-06,
51
+ "loss": 0.9032,
52
+ "step": 60
53
+ },
54
+ {
55
+ "epoch": 0.17738359201773837,
56
+ "grad_norm": 2.711129850639537,
57
+ "learning_rate": 9.58241129660755e-06,
58
+ "loss": 0.8988,
59
+ "step": 70
60
+ },
61
+ {
62
+ "epoch": 0.20272410516312955,
63
+ "grad_norm": 2.816882812285983,
64
+ "learning_rate": 9.398420418028789e-06,
65
+ "loss": 0.9,
66
+ "step": 80
67
+ },
68
+ {
69
+ "epoch": 0.22806461830852076,
70
+ "grad_norm": 2.4491670100971437,
71
+ "learning_rate": 9.183412674395193e-06,
72
+ "loss": 0.889,
73
+ "step": 90
74
+ },
75
+ {
76
+ "epoch": 0.25340513145391197,
77
+ "grad_norm": 2.3671858961630132,
78
+ "learning_rate": 8.938904261417088e-06,
79
+ "loss": 0.8862,
80
+ "step": 100
81
+ },
82
+ {
83
+ "epoch": 0.2787456445993031,
84
+ "grad_norm": 25.362434554086317,
85
+ "learning_rate": 8.666619408187953e-06,
86
+ "loss": 0.8874,
87
+ "step": 110
88
+ },
89
+ {
90
+ "epoch": 0.30408615774469433,
91
+ "grad_norm": 2.435185075586954,
92
+ "learning_rate": 8.368478218232787e-06,
93
+ "loss": 0.8809,
94
+ "step": 120
95
+ },
96
+ {
97
+ "epoch": 0.32942667089008554,
98
+ "grad_norm": 2.3423241237120522,
99
+ "learning_rate": 8.046583129285422e-06,
100
+ "loss": 0.8759,
101
+ "step": 130
102
+ },
103
+ {
104
+ "epoch": 0.35476718403547675,
105
+ "grad_norm": 2.295983766372591,
106
+ "learning_rate": 7.703204087277989e-06,
107
+ "loss": 0.8684,
108
+ "step": 140
109
+ },
110
+ {
111
+ "epoch": 0.3801076971808679,
112
+ "grad_norm": 2.8544179633447255,
113
+ "learning_rate": 7.340762539092858e-06,
114
+ "loss": 0.8676,
115
+ "step": 150
116
+ },
117
+ {
118
+ "epoch": 0.4054482103262591,
119
+ "grad_norm": 2.2374422127818963,
120
+ "learning_rate": 6.961814356957308e-06,
121
+ "loss": 0.8718,
122
+ "step": 160
123
+ },
124
+ {
125
+ "epoch": 0.4307887234716503,
126
+ "grad_norm": 2.1472864212507976,
127
+ "learning_rate": 6.569031814894962e-06,
128
+ "loss": 0.8613,
129
+ "step": 170
130
+ },
131
+ {
132
+ "epoch": 0.4561292366170415,
133
+ "grad_norm": 2.2467067956901183,
134
+ "learning_rate": 6.165184744332824e-06,
135
+ "loss": 0.8587,
136
+ "step": 180
137
+ },
138
+ {
139
+ "epoch": 0.4814697497624327,
140
+ "grad_norm": 2.2528338735220905,
141
+ "learning_rate": 5.753121001751161e-06,
142
+ "loss": 0.8496,
143
+ "step": 190
144
+ },
145
+ {
146
+ "epoch": 0.5068102629078239,
147
+ "grad_norm": 2.4179129846566636,
148
+ "learning_rate": 5.335746386114814e-06,
149
+ "loss": 0.8507,
150
+ "step": 200
151
+ },
152
+ {
153
+ "epoch": 0.532150776053215,
154
+ "grad_norm": 16.529801921641155,
155
+ "learning_rate": 4.9160041477046e-06,
156
+ "loss": 0.8465,
157
+ "step": 210
158
+ },
159
+ {
160
+ "epoch": 0.5574912891986062,
161
+ "grad_norm": 2.1966500837147556,
162
+ "learning_rate": 4.4968542328488e-06,
163
+ "loss": 0.8543,
164
+ "step": 220
165
+ },
166
+ {
167
+ "epoch": 0.5828318023439975,
168
+ "grad_norm": 2.2370934180241986,
169
+ "learning_rate": 4.081252410917148e-06,
170
+ "loss": 0.841,
171
+ "step": 230
172
+ },
173
+ {
174
+ "epoch": 0.6081723154893887,
175
+ "grad_norm": 2.086887732081978,
176
+ "learning_rate": 3.6721294307699786e-06,
177
+ "loss": 0.8358,
178
+ "step": 240
179
+ },
180
+ {
181
+ "epoch": 0.6335128286347799,
182
+ "grad_norm": 2.263852568221778,
183
+ "learning_rate": 3.272370353647465e-06,
184
+ "loss": 0.8242,
185
+ "step": 250
186
+ },
187
+ {
188
+ "epoch": 0.6588533417801711,
189
+ "grad_norm": 2.24355503619341,
190
+ "learning_rate": 2.8847942082397112e-06,
191
+ "loss": 0.83,
192
+ "step": 260
193
+ },
194
+ {
195
+ "epoch": 0.6841938549255623,
196
+ "grad_norm": 2.2286503006725806,
197
+ "learning_rate": 2.512134111406422e-06,
198
+ "loss": 0.8256,
199
+ "step": 270
200
+ },
201
+ {
202
+ "epoch": 0.7095343680709535,
203
+ "grad_norm": 2.1171293835945155,
204
+ "learning_rate": 2.1570179947312674e-06,
205
+ "loss": 0.8219,
206
+ "step": 280
207
+ },
208
+ {
209
+ "epoch": 0.7348748812163446,
210
+ "grad_norm": 2.087191484034938,
211
+ "learning_rate": 1.8219500728237849e-06,
212
+ "loss": 0.8211,
213
+ "step": 290
214
+ },
215
+ {
216
+ "epoch": 0.7602153943617358,
217
+ "grad_norm": 2.3605396760707866,
218
+ "learning_rate": 1.509293184050995e-06,
219
+ "loss": 0.8124,
220
+ "step": 300
221
+ },
222
+ {
223
+ "epoch": 0.785555907507127,
224
+ "grad_norm": 2.1652486052680384,
225
+ "learning_rate": 1.2212521282287093e-06,
226
+ "loss": 0.8189,
227
+ "step": 310
228
+ },
229
+ {
230
+ "epoch": 0.8108964206525182,
231
+ "grad_norm": 2.04980952594426,
232
+ "learning_rate": 9.59858118772105e-07,
233
+ "loss": 0.8151,
234
+ "step": 320
235
+ },
236
+ {
237
+ "epoch": 0.8362369337979094,
238
+ "grad_norm": 2.0755635014559606,
239
+ "learning_rate": 7.269544589461968e-07,
240
+ "loss": 0.8189,
241
+ "step": 330
242
+ },
243
+ {
244
+ "epoch": 0.8615774469433006,
245
+ "grad_norm": 2.0129993073524095,
246
+ "learning_rate": 5.241835432246888e-07,
247
+ "loss": 0.8107,
248
+ "step": 340
249
+ },
250
+ {
251
+ "epoch": 0.8869179600886918,
252
+ "grad_norm": 2.0409156226901315,
253
+ "learning_rate": 3.5297527542127675e-07,
254
+ "loss": 0.8062,
255
+ "step": 350
256
+ },
257
+ {
258
+ "epoch": 0.912258473234083,
259
+ "grad_norm": 2.3050144167741613,
260
+ "learning_rate": 2.1453698526664513e-07,
261
+ "loss": 0.8155,
262
+ "step": 360
263
+ },
264
+ {
265
+ "epoch": 0.9375989863794741,
266
+ "grad_norm": 2.0906040167387188,
267
+ "learning_rate": 1.0984491453762402e-07,
268
+ "loss": 0.8078,
269
+ "step": 370
270
+ },
271
+ {
272
+ "epoch": 0.9629394995248653,
273
+ "grad_norm": 1.94641588069489,
274
+ "learning_rate": 3.963733277679904e-08,
275
+ "loss": 0.802,
276
+ "step": 380
277
+ },
278
+ {
279
+ "epoch": 0.9882800126702566,
280
+ "grad_norm": 2.333717554003802,
281
+ "learning_rate": 4.409331149256013e-09,
282
+ "loss": 0.8076,
283
+ "step": 390
284
+ },
285
+ {
286
+ "epoch": 0.9984162179284131,
287
+ "step": 394,
288
+ "total_flos": 318229560426496.0,
289
+ "train_loss": 0.8606352945269667,
290
+ "train_runtime": 15262.586,
291
+ "train_samples_per_second": 1.655,
292
+ "train_steps_per_second": 0.026
293
+ }
294
+ ],
295
+ "logging_steps": 10,
296
+ "max_steps": 394,
297
+ "num_input_tokens_seen": 0,
298
+ "num_train_epochs": 1,
299
+ "save_steps": 500,
300
+ "stateful_callbacks": {
301
+ "TrainerControl": {
302
+ "args": {
303
+ "should_epoch_stop": false,
304
+ "should_evaluate": false,
305
+ "should_log": false,
306
+ "should_save": true,
307
+ "should_training_stop": true
308
+ },
309
+ "attributes": {}
310
+ }
311
+ },
312
+ "total_flos": 318229560426496.0,
313
+ "train_batch_size": 1,
314
+ "trial_name": null,
315
+ "trial_params": null
316
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a068ca4e3136c75b4af0873420c2dd592d64a9c0dcd60f6bb14bee59463a65
3
+ size 7608
training_loss.png ADDED