woodwardmw commited on
Commit
f1ae877
·
verified ·
1 Parent(s): b76d723

Training in progress, step 1600

Browse files
adapter_config.json ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "jbochi/madlad400-3b-mt",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": [
18
+ "shared",
19
+ "embed_tokens",
20
+ "lm_head",
21
+ "encoder.block.0.layer.0.layer_norm",
22
+ "encoder.block.0.layer.1.layer_norm",
23
+ "encoder.block.1.layer.0.layer_norm",
24
+ "encoder.block.1.layer.1.layer_norm",
25
+ "encoder.block.2.layer.0.layer_norm",
26
+ "encoder.block.2.layer.1.layer_norm",
27
+ "encoder.block.3.layer.0.layer_norm",
28
+ "encoder.block.3.layer.1.layer_norm",
29
+ "encoder.block.4.layer.0.layer_norm",
30
+ "encoder.block.4.layer.1.layer_norm",
31
+ "encoder.block.5.layer.0.layer_norm",
32
+ "encoder.block.5.layer.1.layer_norm",
33
+ "encoder.block.6.layer.0.layer_norm",
34
+ "encoder.block.6.layer.1.layer_norm",
35
+ "encoder.block.7.layer.0.layer_norm",
36
+ "encoder.block.7.layer.1.layer_norm",
37
+ "encoder.block.8.layer.0.layer_norm",
38
+ "encoder.block.8.layer.1.layer_norm",
39
+ "encoder.block.9.layer.0.layer_norm",
40
+ "encoder.block.9.layer.1.layer_norm",
41
+ "encoder.block.10.layer.0.layer_norm",
42
+ "encoder.block.10.layer.1.layer_norm",
43
+ "encoder.block.11.layer.0.layer_norm",
44
+ "encoder.block.11.layer.1.layer_norm",
45
+ "encoder.block.12.layer.0.layer_norm",
46
+ "encoder.block.12.layer.1.layer_norm",
47
+ "encoder.block.13.layer.0.layer_norm",
48
+ "encoder.block.13.layer.1.layer_norm",
49
+ "encoder.block.14.layer.0.layer_norm",
50
+ "encoder.block.14.layer.1.layer_norm",
51
+ "encoder.block.15.layer.0.layer_norm",
52
+ "encoder.block.15.layer.1.layer_norm",
53
+ "encoder.block.16.layer.0.layer_norm",
54
+ "encoder.block.16.layer.1.layer_norm",
55
+ "encoder.block.17.layer.0.layer_norm",
56
+ "encoder.block.17.layer.1.layer_norm",
57
+ "encoder.block.18.layer.0.layer_norm",
58
+ "encoder.block.18.layer.1.layer_norm",
59
+ "encoder.block.19.layer.0.layer_norm",
60
+ "encoder.block.19.layer.1.layer_norm",
61
+ "encoder.block.20.layer.0.layer_norm",
62
+ "encoder.block.20.layer.1.layer_norm",
63
+ "encoder.block.21.layer.0.layer_norm",
64
+ "encoder.block.21.layer.1.layer_norm",
65
+ "encoder.block.22.layer.0.layer_norm",
66
+ "encoder.block.22.layer.1.layer_norm",
67
+ "encoder.block.23.layer.0.layer_norm",
68
+ "encoder.block.23.layer.1.layer_norm",
69
+ "encoder.block.24.layer.0.layer_norm",
70
+ "encoder.block.24.layer.1.layer_norm",
71
+ "encoder.block.25.layer.0.layer_norm",
72
+ "encoder.block.25.layer.1.layer_norm",
73
+ "encoder.block.26.layer.0.layer_norm",
74
+ "encoder.block.26.layer.1.layer_norm",
75
+ "encoder.block.27.layer.0.layer_norm",
76
+ "encoder.block.27.layer.1.layer_norm",
77
+ "encoder.block.28.layer.0.layer_norm",
78
+ "encoder.block.28.layer.1.layer_norm",
79
+ "encoder.block.29.layer.0.layer_norm",
80
+ "encoder.block.29.layer.1.layer_norm",
81
+ "encoder.block.30.layer.0.layer_norm",
82
+ "encoder.block.30.layer.1.layer_norm",
83
+ "encoder.block.31.layer.0.layer_norm",
84
+ "encoder.block.31.layer.1.layer_norm",
85
+ "decoder.block.0.layer.0.layer_norm",
86
+ "decoder.block.0.layer.1.layer_norm",
87
+ "decoder.block.1.layer.0.layer_norm",
88
+ "decoder.block.1.layer.1.layer_norm",
89
+ "decoder.block.2.layer.0.layer_norm",
90
+ "decoder.block.2.layer.1.layer_norm",
91
+ "decoder.block.3.layer.0.layer_norm",
92
+ "decoder.block.3.layer.1.layer_norm",
93
+ "decoder.block.4.layer.0.layer_norm",
94
+ "decoder.block.4.layer.1.layer_norm",
95
+ "decoder.block.5.layer.0.layer_norm",
96
+ "decoder.block.5.layer.1.layer_norm",
97
+ "decoder.block.6.layer.0.layer_norm",
98
+ "decoder.block.6.layer.1.layer_norm",
99
+ "decoder.block.7.layer.0.layer_norm",
100
+ "decoder.block.7.layer.1.layer_norm",
101
+ "decoder.block.8.layer.0.layer_norm",
102
+ "decoder.block.8.layer.1.layer_norm",
103
+ "decoder.block.9.layer.0.layer_norm",
104
+ "decoder.block.9.layer.1.layer_norm",
105
+ "decoder.block.10.layer.0.layer_norm",
106
+ "decoder.block.10.layer.1.layer_norm",
107
+ "decoder.block.11.layer.0.layer_norm",
108
+ "decoder.block.11.layer.1.layer_norm",
109
+ "decoder.block.12.layer.0.layer_norm",
110
+ "decoder.block.12.layer.1.layer_norm",
111
+ "decoder.block.13.layer.0.layer_norm",
112
+ "decoder.block.13.layer.1.layer_norm",
113
+ "decoder.block.14.layer.0.layer_norm",
114
+ "decoder.block.14.layer.1.layer_norm",
115
+ "decoder.block.15.layer.0.layer_norm",
116
+ "decoder.block.15.layer.1.layer_norm",
117
+ "decoder.block.16.layer.0.layer_norm",
118
+ "decoder.block.16.layer.1.layer_norm",
119
+ "decoder.block.17.layer.0.layer_norm",
120
+ "decoder.block.17.layer.1.layer_norm",
121
+ "decoder.block.18.layer.0.layer_norm",
122
+ "decoder.block.18.layer.1.layer_norm",
123
+ "decoder.block.19.layer.0.layer_norm",
124
+ "decoder.block.19.layer.1.layer_norm",
125
+ "decoder.block.20.layer.0.layer_norm",
126
+ "decoder.block.20.layer.1.layer_norm",
127
+ "decoder.block.21.layer.0.layer_norm",
128
+ "decoder.block.21.layer.1.layer_norm",
129
+ "decoder.block.22.layer.0.layer_norm",
130
+ "decoder.block.22.layer.1.layer_norm",
131
+ "decoder.block.23.layer.0.layer_norm",
132
+ "decoder.block.23.layer.1.layer_norm",
133
+ "decoder.block.24.layer.0.layer_norm",
134
+ "decoder.block.24.layer.1.layer_norm",
135
+ "decoder.block.25.layer.0.layer_norm",
136
+ "decoder.block.25.layer.1.layer_norm",
137
+ "decoder.block.26.layer.0.layer_norm",
138
+ "decoder.block.26.layer.1.layer_norm",
139
+ "decoder.block.27.layer.0.layer_norm",
140
+ "decoder.block.27.layer.1.layer_norm",
141
+ "decoder.block.28.layer.0.layer_norm",
142
+ "decoder.block.28.layer.1.layer_norm",
143
+ "decoder.block.29.layer.0.layer_norm",
144
+ "decoder.block.29.layer.1.layer_norm",
145
+ "decoder.block.30.layer.0.layer_norm",
146
+ "decoder.block.30.layer.1.layer_norm",
147
+ "decoder.block.31.layer.0.layer_norm",
148
+ "decoder.block.31.layer.1.layer_norm"
149
+ ],
150
+ "peft_type": "LORA",
151
+ "r": 32,
152
+ "rank_pattern": {},
153
+ "revision": null,
154
+ "target_modules": [
155
+ "o",
156
+ "wi_0",
157
+ "v",
158
+ "q",
159
+ "k",
160
+ "wi_1",
161
+ "wo"
162
+ ],
163
+ "task_type": "SEQ_2_SEQ_LM",
164
+ "use_dora": false,
165
+ "use_rslora": false
166
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f126863dc23f55c3a5530474488118a0bbf746420f8721a019640dd07a80ded8
3
+ size 8807602104
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55c0face0557cadf3b20853781c0c70f0f336a07c597a160180bc3b9fc4d624d
3
+ size 5432