Isotonic commited on
Commit
7634146
·
verified ·
1 Parent(s): 4da5155

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,23 @@ license: apache-2.0
3
  base_model: google/t5-small-lm-adapt
4
  tags:
5
  - generated_from_trainer
 
 
 
 
6
  model-index:
7
  - name: plan_t5
8
- results: []
 
 
 
 
 
 
 
 
 
 
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -13,7 +27,14 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # plan_t5
15
 
16
- This model is a fine-tuned version of [google/t5-small-lm-adapt](https://huggingface.co/google/t5-small-lm-adapt) on an unknown dataset.
 
 
 
 
 
 
 
17
 
18
  ## Model description
19
 
 
3
  base_model: google/t5-small-lm-adapt
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - Isotonic/planner_dataset
8
+ metrics:
9
+ - rouge
10
  model-index:
11
  - name: plan_t5
12
+ results:
13
+ - task:
14
+ name: Summarization
15
+ type: summarization
16
+ dataset:
17
+ name: Isotonic/planner_dataset
18
+ type: Isotonic/planner_dataset
19
+ metrics:
20
+ - name: Rouge1
21
+ type: rouge
22
+ value: 58.1228
23
  ---
24
 
25
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
27
 
28
  # plan_t5
29
 
30
+ This model is a fine-tuned version of [google/t5-small-lm-adapt](https://huggingface.co/google/t5-small-lm-adapt) on the Isotonic/planner_dataset dataset.
31
+ It achieves the following results on the evaluation set:
32
+ - Loss: 1.4366
33
+ - Rouge1: 58.1228
34
+ - Rouge2: 24.3461
35
+ - Rougel: 58.1313
36
+ - Rougelsum: 58.1335
37
+ - Gen Len: 7.9747
38
 
39
  ## Model description
40
 
all_results.json CHANGED
@@ -1,18 +1,18 @@
1
  {
2
- "epoch": 1.0,
3
  "eval_gen_len": 7.9747384536871655,
4
- "eval_loss": 1.4443535804748535,
5
- "eval_rouge1": 58.1207,
6
- "eval_rouge2": 24.3398,
7
- "eval_rougeL": 58.1271,
8
- "eval_rougeLsum": 58.1292,
9
- "eval_runtime": 237.0503,
10
  "eval_samples": 7838,
11
- "eval_samples_per_second": 33.065,
12
- "eval_steps_per_second": 4.134,
13
- "train_loss": 1.643041113804542,
14
- "train_runtime": 148.4875,
15
  "train_samples": 31349,
16
- "train_samples_per_second": 211.122,
17
- "train_steps_per_second": 26.393
18
  }
 
1
  {
2
+ "epoch": 5.0,
3
  "eval_gen_len": 7.9747384536871655,
4
+ "eval_loss": 1.4365952014923096,
5
+ "eval_rouge1": 58.1228,
6
+ "eval_rouge2": 24.3461,
7
+ "eval_rougeL": 58.1313,
8
+ "eval_rougeLsum": 58.1335,
9
+ "eval_runtime": 236.7853,
10
  "eval_samples": 7838,
11
+ "eval_samples_per_second": 33.102,
12
+ "eval_steps_per_second": 4.139,
13
+ "train_loss": 1.525455296723993,
14
+ "train_runtime": 771.3163,
15
  "train_samples": 31349,
16
+ "train_samples_per_second": 203.218,
17
+ "train_steps_per_second": 25.405
18
  }
eval_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 1.0,
3
  "eval_gen_len": 7.9747384536871655,
4
- "eval_loss": 1.4443535804748535,
5
- "eval_rouge1": 58.1207,
6
- "eval_rouge2": 24.3398,
7
- "eval_rougeL": 58.1271,
8
- "eval_rougeLsum": 58.1292,
9
- "eval_runtime": 237.0503,
10
  "eval_samples": 7838,
11
- "eval_samples_per_second": 33.065,
12
- "eval_steps_per_second": 4.134
13
  }
 
1
  {
2
+ "epoch": 5.0,
3
  "eval_gen_len": 7.9747384536871655,
4
+ "eval_loss": 1.4365952014923096,
5
+ "eval_rouge1": 58.1228,
6
+ "eval_rouge2": 24.3461,
7
+ "eval_rougeL": 58.1313,
8
+ "eval_rougeLsum": 58.1335,
9
+ "eval_runtime": 236.7853,
10
  "eval_samples": 7838,
11
+ "eval_samples_per_second": 33.102,
12
+ "eval_steps_per_second": 4.139
13
  }
generation_config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_from_model_config": true,
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
 
1
  {
 
2
  "decoder_start_token_id": 0,
3
  "eos_token_id": 1,
4
  "pad_token_id": 0,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 1.0,
3
- "train_loss": 1.643041113804542,
4
- "train_runtime": 148.4875,
5
  "train_samples": 31349,
6
- "train_samples_per_second": 211.122,
7
- "train_steps_per_second": 26.393
8
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "train_loss": 1.525455296723993,
4
+ "train_runtime": 771.3163,
5
  "train_samples": 31349,
6
+ "train_samples_per_second": 203.218,
7
+ "train_steps_per_second": 25.405
8
  }
trainer_state.json CHANGED
@@ -1,78 +1,302 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 3919,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.13,
13
- "grad_norm": 1.9183778762817383,
14
- "learning_rate": 3.188775510204082e-05,
15
- "loss": 2.8223,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.26,
20
- "grad_norm": 2.046781539916992,
21
- "learning_rate": 4.655502392344498e-05,
22
- "loss": 1.5043,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.38,
27
- "grad_norm": 1.9903734922409058,
28
- "learning_rate": 3.8580542264752795e-05,
29
- "loss": 1.4817,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 0.51,
34
- "grad_norm": 1.7653305530548096,
35
- "learning_rate": 3.060606060606061e-05,
36
- "loss": 1.4671,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 0.64,
41
- "grad_norm": 1.9060845375061035,
42
- "learning_rate": 2.2631578947368423e-05,
43
- "loss": 1.4637,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 0.77,
48
- "grad_norm": 1.784713625907898,
49
- "learning_rate": 1.4657097288676236e-05,
50
- "loss": 1.4588,
51
  "step": 3000
52
  },
53
  {
54
  "epoch": 0.89,
55
- "grad_norm": 1.737605094909668,
56
- "learning_rate": 6.6826156299840516e-06,
57
- "loss": 1.4615,
58
  "step": 3500
59
  },
60
  {
61
- "epoch": 1.0,
62
- "step": 3919,
63
- "total_flos": 102290970089472.0,
64
- "train_loss": 1.643041113804542,
65
- "train_runtime": 148.4875,
66
- "train_samples_per_second": 211.122,
67
- "train_steps_per_second": 26.393
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  }
69
  ],
70
  "logging_steps": 500,
71
- "max_steps": 3919,
72
  "num_input_tokens_seen": 0,
73
- "num_train_epochs": 1,
74
  "save_steps": 500,
75
- "total_flos": 102290970089472.0,
76
  "train_batch_size": 8,
77
  "trial_name": null,
78
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
  "eval_steps": 500,
6
+ "global_step": 19595,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.13,
13
+ "grad_norm": 2.676547050476074,
14
+ "learning_rate": 6.379178361826997e-06,
15
+ "loss": 4.29,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.26,
20
+ "grad_norm": 3.5399041175842285,
21
+ "learning_rate": 1.2758356723653994e-05,
22
+ "loss": 1.5873,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.38,
27
+ "grad_norm": 2.1953482627868652,
28
+ "learning_rate": 1.913753508548099e-05,
29
+ "loss": 1.5261,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 0.51,
34
+ "grad_norm": 2.1363420486450195,
35
+ "learning_rate": 2.5516713447307987e-05,
36
+ "loss": 1.4903,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 0.64,
41
+ "grad_norm": 2.1203014850616455,
42
+ "learning_rate": 3.189589180913499e-05,
43
+ "loss": 1.4803,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 0.77,
48
+ "grad_norm": 1.9569646120071411,
49
+ "learning_rate": 3.827507017096198e-05,
50
+ "loss": 1.4694,
51
  "step": 3000
52
  },
53
  {
54
  "epoch": 0.89,
55
+ "grad_norm": 1.813926100730896,
56
+ "learning_rate": 4.465424853278898e-05,
57
+ "loss": 1.4698,
58
  "step": 3500
59
  },
60
  {
61
+ "epoch": 1.02,
62
+ "grad_norm": 1.609632968902588,
63
+ "learning_rate": 4.974164327634601e-05,
64
+ "loss": 1.4607,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 1.15,
69
+ "grad_norm": 1.7478517293930054,
70
+ "learning_rate": 4.814684868588926e-05,
71
+ "loss": 1.4577,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 1.28,
76
+ "grad_norm": 1.985445261001587,
77
+ "learning_rate": 4.655205409543251e-05,
78
+ "loss": 1.454,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 1.4,
83
+ "grad_norm": 1.48637855052948,
84
+ "learning_rate": 4.495725950497576e-05,
85
+ "loss": 1.4566,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 1.53,
90
+ "grad_norm": 1.9161055088043213,
91
+ "learning_rate": 4.3362464914519015e-05,
92
+ "loss": 1.4513,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 1.66,
97
+ "grad_norm": 1.8389859199523926,
98
+ "learning_rate": 4.1767670324062264e-05,
99
+ "loss": 1.4545,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 1.79,
104
+ "grad_norm": 1.7734977006912231,
105
+ "learning_rate": 4.017287573360551e-05,
106
+ "loss": 1.4484,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 1.91,
111
+ "grad_norm": 2.1142358779907227,
112
+ "learning_rate": 3.857808114314876e-05,
113
+ "loss": 1.447,
114
+ "step": 7500
115
+ },
116
+ {
117
+ "epoch": 2.04,
118
+ "grad_norm": 1.546547532081604,
119
+ "learning_rate": 3.698328655269202e-05,
120
+ "loss": 1.4457,
121
+ "step": 8000
122
+ },
123
+ {
124
+ "epoch": 2.17,
125
+ "grad_norm": 1.6596064567565918,
126
+ "learning_rate": 3.5388491962235266e-05,
127
+ "loss": 1.4474,
128
+ "step": 8500
129
+ },
130
+ {
131
+ "epoch": 2.3,
132
+ "grad_norm": 1.6050539016723633,
133
+ "learning_rate": 3.3793697371778515e-05,
134
+ "loss": 1.4426,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 2.42,
139
+ "grad_norm": 1.8917399644851685,
140
+ "learning_rate": 3.2198902781321764e-05,
141
+ "loss": 1.445,
142
+ "step": 9500
143
+ },
144
+ {
145
+ "epoch": 2.55,
146
+ "grad_norm": 1.8012959957122803,
147
+ "learning_rate": 3.060410819086501e-05,
148
+ "loss": 1.4465,
149
+ "step": 10000
150
+ },
151
+ {
152
+ "epoch": 2.68,
153
+ "grad_norm": 1.4171031713485718,
154
+ "learning_rate": 2.9009313600408265e-05,
155
+ "loss": 1.4439,
156
+ "step": 10500
157
+ },
158
+ {
159
+ "epoch": 2.81,
160
+ "grad_norm": 1.4994091987609863,
161
+ "learning_rate": 2.7414519009951518e-05,
162
+ "loss": 1.4443,
163
+ "step": 11000
164
+ },
165
+ {
166
+ "epoch": 2.93,
167
+ "grad_norm": 1.6185240745544434,
168
+ "learning_rate": 2.5819724419494773e-05,
169
+ "loss": 1.4392,
170
+ "step": 11500
171
+ },
172
+ {
173
+ "epoch": 3.06,
174
+ "grad_norm": 1.600825548171997,
175
+ "learning_rate": 2.4224929829038022e-05,
176
+ "loss": 1.4394,
177
+ "step": 12000
178
+ },
179
+ {
180
+ "epoch": 3.19,
181
+ "grad_norm": 1.565808892250061,
182
+ "learning_rate": 2.263013523858127e-05,
183
+ "loss": 1.4421,
184
+ "step": 12500
185
+ },
186
+ {
187
+ "epoch": 3.32,
188
+ "grad_norm": 1.489320993423462,
189
+ "learning_rate": 2.103534064812452e-05,
190
+ "loss": 1.4404,
191
+ "step": 13000
192
+ },
193
+ {
194
+ "epoch": 3.44,
195
+ "grad_norm": 1.6581943035125732,
196
+ "learning_rate": 1.9440546057667776e-05,
197
+ "loss": 1.4378,
198
+ "step": 13500
199
+ },
200
+ {
201
+ "epoch": 3.57,
202
+ "grad_norm": 1.7949219942092896,
203
+ "learning_rate": 1.7845751467211024e-05,
204
+ "loss": 1.4421,
205
+ "step": 14000
206
+ },
207
+ {
208
+ "epoch": 3.7,
209
+ "grad_norm": 1.58501136302948,
210
+ "learning_rate": 1.6250956876754277e-05,
211
+ "loss": 1.4358,
212
+ "step": 14500
213
+ },
214
+ {
215
+ "epoch": 3.83,
216
+ "grad_norm": 1.6215447187423706,
217
+ "learning_rate": 1.4656162286297526e-05,
218
+ "loss": 1.4411,
219
+ "step": 15000
220
+ },
221
+ {
222
+ "epoch": 3.96,
223
+ "grad_norm": 1.8111215829849243,
224
+ "learning_rate": 1.3061367695840776e-05,
225
+ "loss": 1.4432,
226
+ "step": 15500
227
+ },
228
+ {
229
+ "epoch": 4.08,
230
+ "grad_norm": 1.7637990713119507,
231
+ "learning_rate": 1.1466573105384027e-05,
232
+ "loss": 1.435,
233
+ "step": 16000
234
+ },
235
+ {
236
+ "epoch": 4.21,
237
+ "grad_norm": 1.736876130104065,
238
+ "learning_rate": 9.871778514927279e-06,
239
+ "loss": 1.4429,
240
+ "step": 16500
241
+ },
242
+ {
243
+ "epoch": 4.34,
244
+ "grad_norm": 1.7990552186965942,
245
+ "learning_rate": 8.276983924470528e-06,
246
+ "loss": 1.4312,
247
+ "step": 17000
248
+ },
249
+ {
250
+ "epoch": 4.47,
251
+ "grad_norm": 1.466833233833313,
252
+ "learning_rate": 6.682189334013779e-06,
253
+ "loss": 1.4352,
254
+ "step": 17500
255
+ },
256
+ {
257
+ "epoch": 4.59,
258
+ "grad_norm": 2.320195198059082,
259
+ "learning_rate": 5.08739474355703e-06,
260
+ "loss": 1.4368,
261
+ "step": 18000
262
+ },
263
+ {
264
+ "epoch": 4.72,
265
+ "grad_norm": 1.6469424962997437,
266
+ "learning_rate": 3.492600153100281e-06,
267
+ "loss": 1.4388,
268
+ "step": 18500
269
+ },
270
+ {
271
+ "epoch": 4.85,
272
+ "grad_norm": 1.8656694889068604,
273
+ "learning_rate": 1.8978055626435315e-06,
274
+ "loss": 1.4305,
275
+ "step": 19000
276
+ },
277
+ {
278
+ "epoch": 4.98,
279
+ "grad_norm": 1.6712565422058105,
280
+ "learning_rate": 3.0301097218678235e-07,
281
+ "loss": 1.4416,
282
+ "step": 19500
283
+ },
284
+ {
285
+ "epoch": 5.0,
286
+ "step": 19595,
287
+ "total_flos": 511693022656512.0,
288
+ "train_loss": 1.525455296723993,
289
+ "train_runtime": 771.3163,
290
+ "train_samples_per_second": 203.218,
291
+ "train_steps_per_second": 25.405
292
  }
293
  ],
294
  "logging_steps": 500,
295
+ "max_steps": 19595,
296
  "num_input_tokens_seen": 0,
297
+ "num_train_epochs": 5,
298
  "save_steps": 500,
299
+ "total_flos": 511693022656512.0,
300
  "train_batch_size": 8,
301
  "trial_name": null,
302
  "trial_params": null