Commit
·
4416faf
1
Parent(s):
18534c6
add weights
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +20 -0
- adapter_config.json +21 -0
- adapter_model.bin +3 -0
- all_results.json +7 -0
- checkpoint-100/README.md +20 -0
- checkpoint-100/adapter_config.json +21 -0
- checkpoint-100/adapter_model.bin +3 -0
- checkpoint-100/finetuning_args.json +16 -0
- checkpoint-100/optimizer.pt +3 -0
- checkpoint-100/rng_state_0.pth +3 -0
- checkpoint-100/rng_state_1.pth +3 -0
- checkpoint-100/rng_state_2.pth +3 -0
- checkpoint-100/rng_state_3.pth +3 -0
- checkpoint-100/scheduler.pt +3 -0
- checkpoint-100/trainer_state.json +76 -0
- checkpoint-100/training_args.bin +3 -0
- checkpoint-200/README.md +20 -0
- checkpoint-200/adapter_config.json +21 -0
- checkpoint-200/adapter_model.bin +3 -0
- checkpoint-200/finetuning_args.json +16 -0
- checkpoint-200/optimizer.pt +3 -0
- checkpoint-200/rng_state_0.pth +3 -0
- checkpoint-200/rng_state_1.pth +3 -0
- checkpoint-200/rng_state_2.pth +3 -0
- checkpoint-200/rng_state_3.pth +3 -0
- checkpoint-200/scheduler.pt +3 -0
- checkpoint-200/trainer_state.json +136 -0
- checkpoint-200/training_args.bin +3 -0
- checkpoint-300/README.md +20 -0
- checkpoint-300/adapter_config.json +21 -0
- checkpoint-300/adapter_model.bin +3 -0
- checkpoint-300/finetuning_args.json +16 -0
- checkpoint-300/optimizer.pt +3 -0
- checkpoint-300/rng_state_0.pth +3 -0
- checkpoint-300/rng_state_1.pth +3 -0
- checkpoint-300/rng_state_2.pth +3 -0
- checkpoint-300/rng_state_3.pth +3 -0
- checkpoint-300/scheduler.pt +3 -0
- checkpoint-300/trainer_state.json +196 -0
- checkpoint-300/training_args.bin +3 -0
- checkpoint-400/README.md +20 -0
- checkpoint-400/adapter_config.json +21 -0
- checkpoint-400/adapter_model.bin +3 -0
- checkpoint-400/finetuning_args.json +16 -0
- checkpoint-400/optimizer.pt +3 -0
- checkpoint-400/rng_state_0.pth +3 -0
- checkpoint-400/rng_state_1.pth +3 -0
- checkpoint-400/rng_state_2.pth +3 -0
- checkpoint-400/rng_state_3.pth +3 -0
- checkpoint-400/scheduler.pt +3 -0
README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- load_in_8bit: False
|
9 |
+
- load_in_4bit: True
|
10 |
+
- llm_int8_threshold: 6.0
|
11 |
+
- llm_int8_skip_modules: None
|
12 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
13 |
+
- llm_int8_has_fp16_weight: False
|
14 |
+
- bnb_4bit_quant_type: nf4
|
15 |
+
- bnb_4bit_use_double_quant: True
|
16 |
+
- bnb_4bit_compute_dtype: float16
|
17 |
+
### Framework versions
|
18 |
+
|
19 |
+
|
20 |
+
- PEFT 0.4.0
|
adapter_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "./Llama-2-7b-chat-hf",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 32.0,
|
11 |
+
"lora_dropout": 0.1,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 8,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"v_proj"
|
19 |
+
],
|
20 |
+
"task_type": "CAUSAL_LM"
|
21 |
+
}
|
adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b93f8b5bf9d5ad5daa6fb35494a535ada9f8370a39766fc243709362f04d76a
|
3 |
+
size 16821197
|
all_results.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 1.0,
|
3 |
+
"train_loss": 0.3478317604566875,
|
4 |
+
"train_runtime": 4641.6132,
|
5 |
+
"train_samples_per_second": 13.114,
|
6 |
+
"train_steps_per_second": 0.102
|
7 |
+
}
|
checkpoint-100/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- load_in_8bit: False
|
9 |
+
- load_in_4bit: True
|
10 |
+
- llm_int8_threshold: 6.0
|
11 |
+
- llm_int8_skip_modules: None
|
12 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
13 |
+
- llm_int8_has_fp16_weight: False
|
14 |
+
- bnb_4bit_quant_type: nf4
|
15 |
+
- bnb_4bit_use_double_quant: True
|
16 |
+
- bnb_4bit_compute_dtype: float16
|
17 |
+
### Framework versions
|
18 |
+
|
19 |
+
|
20 |
+
- PEFT 0.4.0
|
checkpoint-100/adapter_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "./Llama-2-7b-chat-hf",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 32.0,
|
11 |
+
"lora_dropout": 0.1,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 8,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"v_proj"
|
19 |
+
],
|
20 |
+
"task_type": "CAUSAL_LM"
|
21 |
+
}
|
checkpoint-100/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd0354676fc0412b2797e2f90d88174804a8c2f0f6c6887a92b0311013533082
|
3 |
+
size 16821197
|
checkpoint-100/finetuning_args.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dpo_beta": 0.1,
|
3 |
+
"finetuning_type": "lora",
|
4 |
+
"lora_alpha": 32.0,
|
5 |
+
"lora_dropout": 0.1,
|
6 |
+
"lora_rank": 8,
|
7 |
+
"lora_target": [
|
8 |
+
"q_proj",
|
9 |
+
"v_proj"
|
10 |
+
],
|
11 |
+
"name_module_trainable": "mlp",
|
12 |
+
"num_hidden_layers": 32,
|
13 |
+
"num_layer_trainable": 3,
|
14 |
+
"ppo_score_norm": false,
|
15 |
+
"resume_lora_training": true
|
16 |
+
}
|
checkpoint-100/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9fa05b8eb370479a3502e3e9a63152954d7417b77da798fb18905d3fe51b528
|
3 |
+
size 33661637
|
checkpoint-100/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:685ab4b3038810680f88b1d59df769ea68e7d862434955abae0ca497f13e8afe
|
3 |
+
size 18679
|
checkpoint-100/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:87cf45370820610b9f28e800b706764c94c0ff8527bf390c3c5f9fff93afde7e
|
3 |
+
size 18679
|
checkpoint-100/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5ebf9800688c38adccc12fa6858378c50e1f7d26a2e5570e8b7e04dff1e70262
|
3 |
+
size 18679
|
checkpoint-100/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2bb7e4e4e8915c52d5b3b47831e6e2e038319baed3305fb634f6fab3b999133
|
3 |
+
size 18679
|
checkpoint-100/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:208fa747ba957a9f6589a9eb1cf3905edf393236e5e918f882719b448f1edfab
|
3 |
+
size 627
|
checkpoint-100/trainer_state.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.21019442984760903,
|
5 |
+
"global_step": 100,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.02,
|
12 |
+
"learning_rate": 4.994534068046937e-05,
|
13 |
+
"loss": 1.8599,
|
14 |
+
"step": 10
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.04,
|
18 |
+
"learning_rate": 4.980286753286195e-05,
|
19 |
+
"loss": 1.6162,
|
20 |
+
"step": 20
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.06,
|
24 |
+
"learning_rate": 4.965903258506806e-05,
|
25 |
+
"loss": 1.5187,
|
26 |
+
"step": 30
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 0.08,
|
30 |
+
"learning_rate": 4.95415555523494e-05,
|
31 |
+
"loss": 1.4685,
|
32 |
+
"step": 40
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 0.11,
|
36 |
+
"learning_rate": 4.929469844930753e-05,
|
37 |
+
"loss": 1.3233,
|
38 |
+
"step": 50
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 0.13,
|
42 |
+
"learning_rate": 4.885188413200075e-05,
|
43 |
+
"loss": 1.1081,
|
44 |
+
"step": 60
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.15,
|
48 |
+
"learning_rate": 4.830477159419966e-05,
|
49 |
+
"loss": 0.9364,
|
50 |
+
"step": 70
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"epoch": 0.17,
|
54 |
+
"learning_rate": 4.7655753219826114e-05,
|
55 |
+
"loss": 0.7434,
|
56 |
+
"step": 80
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 0.19,
|
60 |
+
"learning_rate": 4.690766700109659e-05,
|
61 |
+
"loss": 0.5053,
|
62 |
+
"step": 90
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 0.21,
|
66 |
+
"learning_rate": 4.6063784128704367e-05,
|
67 |
+
"loss": 0.2824,
|
68 |
+
"step": 100
|
69 |
+
}
|
70 |
+
],
|
71 |
+
"max_steps": 475,
|
72 |
+
"num_train_epochs": 1,
|
73 |
+
"total_flos": 1.906252514852864e+17,
|
74 |
+
"trial_name": null,
|
75 |
+
"trial_params": null
|
76 |
+
}
|
checkpoint-100/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0474fbb331a6a8df843475fa87e752177bc451d593d9d453141d5ff0e7d077c9
|
3 |
+
size 3230
|
checkpoint-200/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- load_in_8bit: False
|
9 |
+
- load_in_4bit: True
|
10 |
+
- llm_int8_threshold: 6.0
|
11 |
+
- llm_int8_skip_modules: None
|
12 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
13 |
+
- llm_int8_has_fp16_weight: False
|
14 |
+
- bnb_4bit_quant_type: nf4
|
15 |
+
- bnb_4bit_use_double_quant: True
|
16 |
+
- bnb_4bit_compute_dtype: float16
|
17 |
+
### Framework versions
|
18 |
+
|
19 |
+
|
20 |
+
- PEFT 0.4.0
|
checkpoint-200/adapter_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "./Llama-2-7b-chat-hf",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 32.0,
|
11 |
+
"lora_dropout": 0.1,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 8,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"v_proj"
|
19 |
+
],
|
20 |
+
"task_type": "CAUSAL_LM"
|
21 |
+
}
|
checkpoint-200/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9589e68864de41dda9c6565f54ffba12fb3e507b01057735af691bd61816d7e9
|
3 |
+
size 16821197
|
checkpoint-200/finetuning_args.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dpo_beta": 0.1,
|
3 |
+
"finetuning_type": "lora",
|
4 |
+
"lora_alpha": 32.0,
|
5 |
+
"lora_dropout": 0.1,
|
6 |
+
"lora_rank": 8,
|
7 |
+
"lora_target": [
|
8 |
+
"q_proj",
|
9 |
+
"v_proj"
|
10 |
+
],
|
11 |
+
"name_module_trainable": "mlp",
|
12 |
+
"num_hidden_layers": 32,
|
13 |
+
"num_layer_trainable": 3,
|
14 |
+
"ppo_score_norm": false,
|
15 |
+
"resume_lora_training": true
|
16 |
+
}
|
checkpoint-200/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:689530c5ed411fab4750761eba5dc1d9ece79c8c9ee5ab1cb62763a15c6460a7
|
3 |
+
size 33661637
|
checkpoint-200/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54ec3a97f0cae79cfabb9169aa5be59177f8e0bfe81f0e4c20223ae5b71d130f
|
3 |
+
size 18679
|
checkpoint-200/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df4add78733002391aed792a12c30bd240f199d144e8cf68bb14a04f4616cb1c
|
3 |
+
size 18679
|
checkpoint-200/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9349eae2e3e94545c210ae5d4b2a31c0da779a93963c62394c9a3627d608d546
|
3 |
+
size 18679
|
checkpoint-200/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bac5028c8e4f410f0ef77e3417211d34afd069e3195852c295664fb9ab37a77f
|
3 |
+
size 18679
|
checkpoint-200/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1a5f015cad4cd83d66afff7dcd01e4844b2647942ed23ffebf4a1059172aed00
|
3 |
+
size 627
|
checkpoint-200/trainer_state.json
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.42038885969521805,
|
5 |
+
"global_step": 200,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.02,
|
12 |
+
"learning_rate": 4.994534068046937e-05,
|
13 |
+
"loss": 1.8599,
|
14 |
+
"step": 10
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.04,
|
18 |
+
"learning_rate": 4.980286753286195e-05,
|
19 |
+
"loss": 1.6162,
|
20 |
+
"step": 20
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.06,
|
24 |
+
"learning_rate": 4.965903258506806e-05,
|
25 |
+
"loss": 1.5187,
|
26 |
+
"step": 30
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 0.08,
|
30 |
+
"learning_rate": 4.95415555523494e-05,
|
31 |
+
"loss": 1.4685,
|
32 |
+
"step": 40
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 0.11,
|
36 |
+
"learning_rate": 4.929469844930753e-05,
|
37 |
+
"loss": 1.3233,
|
38 |
+
"step": 50
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 0.13,
|
42 |
+
"learning_rate": 4.885188413200075e-05,
|
43 |
+
"loss": 1.1081,
|
44 |
+
"step": 60
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.15,
|
48 |
+
"learning_rate": 4.830477159419966e-05,
|
49 |
+
"loss": 0.9364,
|
50 |
+
"step": 70
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"epoch": 0.17,
|
54 |
+
"learning_rate": 4.7655753219826114e-05,
|
55 |
+
"loss": 0.7434,
|
56 |
+
"step": 80
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 0.19,
|
60 |
+
"learning_rate": 4.690766700109659e-05,
|
61 |
+
"loss": 0.5053,
|
62 |
+
"step": 90
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 0.21,
|
66 |
+
"learning_rate": 4.6063784128704367e-05,
|
67 |
+
"loss": 0.2824,
|
68 |
+
"step": 100
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"epoch": 0.23,
|
72 |
+
"learning_rate": 4.512779468773494e-05,
|
73 |
+
"loss": 0.1312,
|
74 |
+
"step": 110
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 0.25,
|
78 |
+
"learning_rate": 4.4103791521862784e-05,
|
79 |
+
"loss": 0.0681,
|
80 |
+
"step": 120
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"epoch": 0.27,
|
84 |
+
"learning_rate": 4.2996252336387414e-05,
|
85 |
+
"loss": 0.0496,
|
86 |
+
"step": 130
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.29,
|
90 |
+
"learning_rate": 4.181002011836737e-05,
|
91 |
+
"loss": 0.0482,
|
92 |
+
"step": 140
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"epoch": 0.32,
|
96 |
+
"learning_rate": 4.0550281959470023e-05,
|
97 |
+
"loss": 0.0497,
|
98 |
+
"step": 150
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 0.34,
|
102 |
+
"learning_rate": 3.9222546374139533e-05,
|
103 |
+
"loss": 0.0523,
|
104 |
+
"step": 160
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 0.36,
|
108 |
+
"learning_rate": 3.783261921226479e-05,
|
109 |
+
"loss": 0.0561,
|
110 |
+
"step": 170
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 0.38,
|
114 |
+
"learning_rate": 3.6386578271674984e-05,
|
115 |
+
"loss": 0.0606,
|
116 |
+
"step": 180
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"epoch": 0.4,
|
120 |
+
"learning_rate": 3.489074672147621e-05,
|
121 |
+
"loss": 0.0664,
|
122 |
+
"step": 190
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"epoch": 0.42,
|
126 |
+
"learning_rate": 3.335166545244178e-05,
|
127 |
+
"loss": 0.0742,
|
128 |
+
"step": 200
|
129 |
+
}
|
130 |
+
],
|
131 |
+
"max_steps": 475,
|
132 |
+
"num_train_epochs": 1,
|
133 |
+
"total_flos": 3.8128239525022925e+17,
|
134 |
+
"trial_name": null,
|
135 |
+
"trial_params": null
|
136 |
+
}
|
checkpoint-200/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0474fbb331a6a8df843475fa87e752177bc451d593d9d453141d5ff0e7d077c9
|
3 |
+
size 3230
|
checkpoint-300/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- load_in_8bit: False
|
9 |
+
- load_in_4bit: True
|
10 |
+
- llm_int8_threshold: 6.0
|
11 |
+
- llm_int8_skip_modules: None
|
12 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
13 |
+
- llm_int8_has_fp16_weight: False
|
14 |
+
- bnb_4bit_quant_type: nf4
|
15 |
+
- bnb_4bit_use_double_quant: True
|
16 |
+
- bnb_4bit_compute_dtype: float16
|
17 |
+
### Framework versions
|
18 |
+
|
19 |
+
|
20 |
+
- PEFT 0.4.0
|
checkpoint-300/adapter_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "./Llama-2-7b-chat-hf",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 32.0,
|
11 |
+
"lora_dropout": 0.1,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 8,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"v_proj"
|
19 |
+
],
|
20 |
+
"task_type": "CAUSAL_LM"
|
21 |
+
}
|
checkpoint-300/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ffe75cda42fc1f1bf8f72a47ff9c7c2eeab14b9cd3987fa6b00c4f75cf623fd
|
3 |
+
size 16821197
|
checkpoint-300/finetuning_args.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dpo_beta": 0.1,
|
3 |
+
"finetuning_type": "lora",
|
4 |
+
"lora_alpha": 32.0,
|
5 |
+
"lora_dropout": 0.1,
|
6 |
+
"lora_rank": 8,
|
7 |
+
"lora_target": [
|
8 |
+
"q_proj",
|
9 |
+
"v_proj"
|
10 |
+
],
|
11 |
+
"name_module_trainable": "mlp",
|
12 |
+
"num_hidden_layers": 32,
|
13 |
+
"num_layer_trainable": 3,
|
14 |
+
"ppo_score_norm": false,
|
15 |
+
"resume_lora_training": true
|
16 |
+
}
|
checkpoint-300/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec7bf627d9efb82fc4d96784a0bcb7e8a249b9e768b2678c9aaec52021a38cec
|
3 |
+
size 33661637
|
checkpoint-300/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c89e638ed7375ec974a55e3a3c91729e83049337874afec1eb571fdf63379cf
|
3 |
+
size 18679
|
checkpoint-300/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6ee0899f0aa87a4b654957c6791a8ce0592556287ba9a873cd50e551c39e9322
|
3 |
+
size 18679
|
checkpoint-300/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:903e65ab7c1af4e8289a7598d59aea37a64efca8f1f755fb387a7ee8a9f632c5
|
3 |
+
size 18679
|
checkpoint-300/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e76da0096c2fae55285370594561055d0b90a35ef08c5fdb801ca5eb7bce8ec2
|
3 |
+
size 18679
|
checkpoint-300/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d3a63edbf6cbe2235680152d6107703acd3337683d4cbf948f62e2c6452d486
|
3 |
+
size 627
|
checkpoint-300/trainer_state.json
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.6305832895428272,
|
5 |
+
"global_step": 300,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.02,
|
12 |
+
"learning_rate": 4.994534068046937e-05,
|
13 |
+
"loss": 1.8599,
|
14 |
+
"step": 10
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.04,
|
18 |
+
"learning_rate": 4.980286753286195e-05,
|
19 |
+
"loss": 1.6162,
|
20 |
+
"step": 20
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.06,
|
24 |
+
"learning_rate": 4.965903258506806e-05,
|
25 |
+
"loss": 1.5187,
|
26 |
+
"step": 30
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 0.08,
|
30 |
+
"learning_rate": 4.95415555523494e-05,
|
31 |
+
"loss": 1.4685,
|
32 |
+
"step": 40
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 0.11,
|
36 |
+
"learning_rate": 4.929469844930753e-05,
|
37 |
+
"loss": 1.3233,
|
38 |
+
"step": 50
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 0.13,
|
42 |
+
"learning_rate": 4.885188413200075e-05,
|
43 |
+
"loss": 1.1081,
|
44 |
+
"step": 60
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.15,
|
48 |
+
"learning_rate": 4.830477159419966e-05,
|
49 |
+
"loss": 0.9364,
|
50 |
+
"step": 70
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"epoch": 0.17,
|
54 |
+
"learning_rate": 4.7655753219826114e-05,
|
55 |
+
"loss": 0.7434,
|
56 |
+
"step": 80
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 0.19,
|
60 |
+
"learning_rate": 4.690766700109659e-05,
|
61 |
+
"loss": 0.5053,
|
62 |
+
"step": 90
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 0.21,
|
66 |
+
"learning_rate": 4.6063784128704367e-05,
|
67 |
+
"loss": 0.2824,
|
68 |
+
"step": 100
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"epoch": 0.23,
|
72 |
+
"learning_rate": 4.512779468773494e-05,
|
73 |
+
"loss": 0.1312,
|
74 |
+
"step": 110
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 0.25,
|
78 |
+
"learning_rate": 4.4103791521862784e-05,
|
79 |
+
"loss": 0.0681,
|
80 |
+
"step": 120
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"epoch": 0.27,
|
84 |
+
"learning_rate": 4.2996252336387414e-05,
|
85 |
+
"loss": 0.0496,
|
86 |
+
"step": 130
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 0.29,
|
90 |
+
"learning_rate": 4.181002011836737e-05,
|
91 |
+
"loss": 0.0482,
|
92 |
+
"step": 140
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"epoch": 0.32,
|
96 |
+
"learning_rate": 4.0550281959470023e-05,
|
97 |
+
"loss": 0.0497,
|
98 |
+
"step": 150
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 0.34,
|
102 |
+
"learning_rate": 3.9222546374139533e-05,
|
103 |
+
"loss": 0.0523,
|
104 |
+
"step": 160
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 0.36,
|
108 |
+
"learning_rate": 3.783261921226479e-05,
|
109 |
+
"loss": 0.0561,
|
110 |
+
"step": 170
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 0.38,
|
114 |
+
"learning_rate": 3.6386578271674984e-05,
|
115 |
+
"loss": 0.0606,
|
116 |
+
"step": 180
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"epoch": 0.4,
|
120 |
+
"learning_rate": 3.489074672147621e-05,
|
121 |
+
"loss": 0.0664,
|
122 |
+
"step": 190
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"epoch": 0.42,
|
126 |
+
"learning_rate": 3.335166545244178e-05,
|
127 |
+
"loss": 0.0742,
|
128 |
+
"step": 200
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 0.44,
|
132 |
+
"learning_rate": 3.1776064475361114e-05,
|
133 |
+
"loss": 0.0799,
|
134 |
+
"step": 210
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"epoch": 0.46,
|
138 |
+
"learning_rate": 3.017083349241492e-05,
|
139 |
+
"loss": 0.0873,
|
140 |
+
"step": 220
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"epoch": 0.48,
|
144 |
+
"learning_rate": 2.8542991770260608e-05,
|
145 |
+
"loss": 0.0952,
|
146 |
+
"step": 230
|
147 |
+
},
|
148 |
+
{
|
149 |
+
"epoch": 0.5,
|
150 |
+
"learning_rate": 2.689965744656508e-05,
|
151 |
+
"loss": 0.1036,
|
152 |
+
"step": 240
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"epoch": 0.53,
|
156 |
+
"learning_rate": 2.5248016404199908e-05,
|
157 |
+
"loss": 0.1103,
|
158 |
+
"step": 250
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"epoch": 0.55,
|
162 |
+
"learning_rate": 2.3595290849203862e-05,
|
163 |
+
"loss": 0.1207,
|
164 |
+
"step": 260
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"epoch": 0.57,
|
168 |
+
"learning_rate": 2.19487077299135e-05,
|
169 |
+
"loss": 0.1254,
|
170 |
+
"step": 270
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 0.59,
|
174 |
+
"learning_rate": 2.031546713535688e-05,
|
175 |
+
"loss": 0.1387,
|
176 |
+
"step": 280
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"epoch": 0.61,
|
180 |
+
"learning_rate": 1.87027108110963e-05,
|
181 |
+
"loss": 0.1402,
|
182 |
+
"step": 290
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"epoch": 0.63,
|
186 |
+
"learning_rate": 1.7117490930191965e-05,
|
187 |
+
"loss": 0.1518,
|
188 |
+
"step": 300
|
189 |
+
}
|
190 |
+
],
|
191 |
+
"max_steps": 475,
|
192 |
+
"num_train_epochs": 1,
|
193 |
+
"total_flos": 5.7197792313789645e+17,
|
194 |
+
"trial_name": null,
|
195 |
+
"trial_params": null
|
196 |
+
}
|
checkpoint-300/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0474fbb331a6a8df843475fa87e752177bc451d593d9d453141d5ff0e7d077c9
|
3 |
+
size 3230
|
checkpoint-400/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- load_in_8bit: False
|
9 |
+
- load_in_4bit: True
|
10 |
+
- llm_int8_threshold: 6.0
|
11 |
+
- llm_int8_skip_modules: None
|
12 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
13 |
+
- llm_int8_has_fp16_weight: False
|
14 |
+
- bnb_4bit_quant_type: nf4
|
15 |
+
- bnb_4bit_use_double_quant: True
|
16 |
+
- bnb_4bit_compute_dtype: float16
|
17 |
+
### Framework versions
|
18 |
+
|
19 |
+
|
20 |
+
- PEFT 0.4.0
|
checkpoint-400/adapter_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "./Llama-2-7b-chat-hf",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 32.0,
|
11 |
+
"lora_dropout": 0.1,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 8,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"q_proj",
|
18 |
+
"v_proj"
|
19 |
+
],
|
20 |
+
"task_type": "CAUSAL_LM"
|
21 |
+
}
|
checkpoint-400/adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81dea3d855b2b268a97491ab9a1f036d167f3d7b363ac10a851ebc51bff2b973
|
3 |
+
size 16821197
|
checkpoint-400/finetuning_args.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dpo_beta": 0.1,
|
3 |
+
"finetuning_type": "lora",
|
4 |
+
"lora_alpha": 32.0,
|
5 |
+
"lora_dropout": 0.1,
|
6 |
+
"lora_rank": 8,
|
7 |
+
"lora_target": [
|
8 |
+
"q_proj",
|
9 |
+
"v_proj"
|
10 |
+
],
|
11 |
+
"name_module_trainable": "mlp",
|
12 |
+
"num_hidden_layers": 32,
|
13 |
+
"num_layer_trainable": 3,
|
14 |
+
"ppo_score_norm": false,
|
15 |
+
"resume_lora_training": true
|
16 |
+
}
|
checkpoint-400/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc502017347738b7045a6d970b4f246523c4efb9d497c845fb2c0c7ab15a2c0b
|
3 |
+
size 33661637
|
checkpoint-400/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3071bdb3d8008d67ade26df133a9de77e9c5511a68bdf006986873b194d0729c
|
3 |
+
size 18679
|
checkpoint-400/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4d04dc549b97a70c877112b4ea697c076d6467a073e38aab23a469999e8d895
|
3 |
+
size 18679
|
checkpoint-400/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4dbedb16f08f5b1ebbf7626b3dc8d974a9f361b4fc37857836a2365dfb2860db
|
3 |
+
size 18679
|
checkpoint-400/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68913ab66483a1233fba4fda9d86e1328db1962b0d2dcc0be4a9ca8a0498758f
|
3 |
+
size 18679
|
checkpoint-400/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3336dcfe269fac13b4f189bb47e77778dcb14b26aa3143efeba85141081ece94
|
3 |
+
size 627
|