akant commited on
Commit
7f3855c
·
verified ·
1 Parent(s): 64b33c5

Upload folder using huggingface_hub

Browse files
checkpoint-23600/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "sad",
13
+ "1": "disgust",
14
+ "2": "angry",
15
+ "3": "neutral",
16
+ "4": "fear",
17
+ "5": "surprise",
18
+ "6": "happy"
19
+ },
20
+ "image_size": 224,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "angry": 2,
25
+ "disgust": 1,
26
+ "fear": 4,
27
+ "happy": 6,
28
+ "neutral": 3,
29
+ "sad": 0,
30
+ "surprise": 5
31
+ },
32
+ "layer_norm_eps": 1e-12,
33
+ "model_type": "vit",
34
+ "num_attention_heads": 12,
35
+ "num_channels": 3,
36
+ "num_hidden_layers": 12,
37
+ "patch_size": 16,
38
+ "problem_type": "single_label_classification",
39
+ "qkv_bias": true,
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.47.1"
42
+ }
checkpoint-23600/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33773b4580db1e26a0b690c0fa200cf4c0844e95e4769e234a682b37712bed06
3
+ size 343239356
checkpoint-23600/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d097154adced990bf2d07490a9a0aacd42dca52018260bb14377aea03f9a585d
3
+ size 686599173
checkpoint-23600/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
checkpoint-23600/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d65f7e8c7c84bb8b89542d2d12528733e94ace95771b36dd383042c3be18a87a
3
+ size 14575
checkpoint-23600/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02b62ac223ffa5c4d25055aa820a995bc16441497fc3731b27ee1a4e9e97ac5e
3
+ size 627
checkpoint-23600/trainer_state.json ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.0445903539657593,
3
+ "best_model_checkpoint": "facial_emotions_image_detection/checkpoint-23600",
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 23600,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.423728813559322,
13
+ "grad_norm": 1.6307811737060547,
14
+ "learning_rate": 2.9541595925297115e-06,
15
+ "loss": 1.8794,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.847457627118644,
20
+ "grad_norm": 2.476940631866455,
21
+ "learning_rate": 2.903225806451613e-06,
22
+ "loss": 1.6633,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_accuracy": 0.43746523639253077,
28
+ "eval_loss": 1.7183066606521606,
29
+ "eval_model_preparation_time": 0.0035,
30
+ "eval_runtime": 222.116,
31
+ "eval_samples_per_second": 113.319,
32
+ "eval_steps_per_second": 14.168,
33
+ "step": 1180
34
+ },
35
+ {
36
+ "epoch": 1.271186440677966,
37
+ "grad_norm": 2.9950172901153564,
38
+ "learning_rate": 2.8522920203735145e-06,
39
+ "loss": 1.5106,
40
+ "step": 1500
41
+ },
42
+ {
43
+ "epoch": 1.694915254237288,
44
+ "grad_norm": 4.541733741760254,
45
+ "learning_rate": 2.801358234295416e-06,
46
+ "loss": 1.4038,
47
+ "step": 2000
48
+ },
49
+ {
50
+ "epoch": 2.0,
51
+ "eval_accuracy": 0.5050059594755661,
52
+ "eval_loss": 1.5001347064971924,
53
+ "eval_model_preparation_time": 0.0035,
54
+ "eval_runtime": 223.7558,
55
+ "eval_samples_per_second": 112.489,
56
+ "eval_steps_per_second": 14.064,
57
+ "step": 2360
58
+ },
59
+ {
60
+ "epoch": 2.1186440677966103,
61
+ "grad_norm": 4.030027389526367,
62
+ "learning_rate": 2.7504244482173174e-06,
63
+ "loss": 1.3289,
64
+ "step": 2500
65
+ },
66
+ {
67
+ "epoch": 2.542372881355932,
68
+ "grad_norm": 5.033902645111084,
69
+ "learning_rate": 2.699490662139219e-06,
70
+ "loss": 1.2744,
71
+ "step": 3000
72
+ },
73
+ {
74
+ "epoch": 2.9661016949152543,
75
+ "grad_norm": 4.396991729736328,
76
+ "learning_rate": 2.648556876061121e-06,
77
+ "loss": 1.2427,
78
+ "step": 3500
79
+ },
80
+ {
81
+ "epoch": 3.0,
82
+ "eval_accuracy": 0.5299562971791816,
83
+ "eval_loss": 1.364925742149353,
84
+ "eval_model_preparation_time": 0.0035,
85
+ "eval_runtime": 224.6343,
86
+ "eval_samples_per_second": 112.049,
87
+ "eval_steps_per_second": 14.009,
88
+ "step": 3540
89
+ },
90
+ {
91
+ "epoch": 3.389830508474576,
92
+ "grad_norm": 4.660696983337402,
93
+ "learning_rate": 2.597623089983022e-06,
94
+ "loss": 1.1981,
95
+ "step": 4000
96
+ },
97
+ {
98
+ "epoch": 3.8135593220338984,
99
+ "grad_norm": 5.578737258911133,
100
+ "learning_rate": 2.546689303904924e-06,
101
+ "loss": 1.1535,
102
+ "step": 4500
103
+ },
104
+ {
105
+ "epoch": 4.0,
106
+ "eval_accuracy": 0.5553436630909814,
107
+ "eval_loss": 1.2839970588684082,
108
+ "eval_model_preparation_time": 0.0035,
109
+ "eval_runtime": 227.7991,
110
+ "eval_samples_per_second": 110.492,
111
+ "eval_steps_per_second": 13.815,
112
+ "step": 4720
113
+ },
114
+ {
115
+ "epoch": 4.237288135593221,
116
+ "grad_norm": 6.838766574859619,
117
+ "learning_rate": 2.495755517826825e-06,
118
+ "loss": 1.1359,
119
+ "step": 5000
120
+ },
121
+ {
122
+ "epoch": 4.661016949152542,
123
+ "grad_norm": 6.198632717132568,
124
+ "learning_rate": 2.4448217317487268e-06,
125
+ "loss": 1.1001,
126
+ "step": 5500
127
+ },
128
+ {
129
+ "epoch": 5.0,
130
+ "eval_accuracy": 0.5646007151370679,
131
+ "eval_loss": 1.229695439338684,
132
+ "eval_model_preparation_time": 0.0035,
133
+ "eval_runtime": 223.7686,
134
+ "eval_samples_per_second": 112.482,
135
+ "eval_steps_per_second": 14.064,
136
+ "step": 5900
137
+ },
138
+ {
139
+ "epoch": 5.084745762711864,
140
+ "grad_norm": 6.204673767089844,
141
+ "learning_rate": 2.393887945670628e-06,
142
+ "loss": 1.0829,
143
+ "step": 6000
144
+ },
145
+ {
146
+ "epoch": 5.508474576271187,
147
+ "grad_norm": 5.266815662384033,
148
+ "learning_rate": 2.3429541595925297e-06,
149
+ "loss": 1.0661,
150
+ "step": 6500
151
+ },
152
+ {
153
+ "epoch": 5.932203389830509,
154
+ "grad_norm": 5.726169109344482,
155
+ "learning_rate": 2.2920203735144314e-06,
156
+ "loss": 1.0279,
157
+ "step": 7000
158
+ },
159
+ {
160
+ "epoch": 6.0,
161
+ "eval_accuracy": 0.5670242352006357,
162
+ "eval_loss": 1.203406572341919,
163
+ "eval_model_preparation_time": 0.0035,
164
+ "eval_runtime": 232.0453,
165
+ "eval_samples_per_second": 108.47,
166
+ "eval_steps_per_second": 13.562,
167
+ "step": 7080
168
+ },
169
+ {
170
+ "epoch": 6.3559322033898304,
171
+ "grad_norm": 15.006390571594238,
172
+ "learning_rate": 2.241086587436333e-06,
173
+ "loss": 1.0108,
174
+ "step": 7500
175
+ },
176
+ {
177
+ "epoch": 6.779661016949152,
178
+ "grad_norm": 6.441979885101318,
179
+ "learning_rate": 2.1901528013582344e-06,
180
+ "loss": 0.9936,
181
+ "step": 8000
182
+ },
183
+ {
184
+ "epoch": 7.0,
185
+ "eval_accuracy": 0.5793802145411204,
186
+ "eval_loss": 1.1601252555847168,
187
+ "eval_model_preparation_time": 0.0035,
188
+ "eval_runtime": 240.4411,
189
+ "eval_samples_per_second": 104.683,
190
+ "eval_steps_per_second": 13.088,
191
+ "step": 8260
192
+ },
193
+ {
194
+ "epoch": 7.203389830508475,
195
+ "grad_norm": 13.402597427368164,
196
+ "learning_rate": 2.1392190152801356e-06,
197
+ "loss": 0.9714,
198
+ "step": 8500
199
+ },
200
+ {
201
+ "epoch": 7.627118644067797,
202
+ "grad_norm": 7.855940341949463,
203
+ "learning_rate": 2.0882852292020373e-06,
204
+ "loss": 0.9641,
205
+ "step": 9000
206
+ },
207
+ {
208
+ "epoch": 8.0,
209
+ "eval_accuracy": 0.5829161700437028,
210
+ "eval_loss": 1.1439958810806274,
211
+ "eval_model_preparation_time": 0.0035,
212
+ "eval_runtime": 230.8248,
213
+ "eval_samples_per_second": 109.044,
214
+ "eval_steps_per_second": 13.634,
215
+ "step": 9440
216
+ },
217
+ {
218
+ "epoch": 8.05084745762712,
219
+ "grad_norm": 9.408000946044922,
220
+ "learning_rate": 2.037351443123939e-06,
221
+ "loss": 0.955,
222
+ "step": 9500
223
+ },
224
+ {
225
+ "epoch": 8.474576271186441,
226
+ "grad_norm": 7.728194236755371,
227
+ "learning_rate": 1.9864176570458407e-06,
228
+ "loss": 0.9384,
229
+ "step": 10000
230
+ },
231
+ {
232
+ "epoch": 8.898305084745763,
233
+ "grad_norm": 7.551913738250732,
234
+ "learning_rate": 1.935483870967742e-06,
235
+ "loss": 0.9203,
236
+ "step": 10500
237
+ },
238
+ {
239
+ "epoch": 9.0,
240
+ "eval_accuracy": 0.5870480731029003,
241
+ "eval_loss": 1.1307237148284912,
242
+ "eval_model_preparation_time": 0.0035,
243
+ "eval_runtime": 230.0359,
244
+ "eval_samples_per_second": 109.418,
245
+ "eval_steps_per_second": 13.68,
246
+ "step": 10620
247
+ },
248
+ {
249
+ "epoch": 9.322033898305085,
250
+ "grad_norm": 10.311104774475098,
251
+ "learning_rate": 1.8845500848896437e-06,
252
+ "loss": 0.9118,
253
+ "step": 11000
254
+ },
255
+ {
256
+ "epoch": 9.745762711864407,
257
+ "grad_norm": 15.17727279663086,
258
+ "learning_rate": 1.833616298811545e-06,
259
+ "loss": 0.9054,
260
+ "step": 11500
261
+ },
262
+ {
263
+ "epoch": 10.0,
264
+ "eval_accuracy": 0.5841080651569328,
265
+ "eval_loss": 1.1200737953186035,
266
+ "eval_model_preparation_time": 0.0035,
267
+ "eval_runtime": 229.8788,
268
+ "eval_samples_per_second": 109.492,
269
+ "eval_steps_per_second": 13.69,
270
+ "step": 11800
271
+ },
272
+ {
273
+ "epoch": 10.169491525423728,
274
+ "grad_norm": 9.014813423156738,
275
+ "learning_rate": 1.7826825127334464e-06,
276
+ "loss": 0.9008,
277
+ "step": 12000
278
+ },
279
+ {
280
+ "epoch": 10.59322033898305,
281
+ "grad_norm": 12.12553596496582,
282
+ "learning_rate": 1.7317487266553481e-06,
283
+ "loss": 0.8756,
284
+ "step": 12500
285
+ },
286
+ {
287
+ "epoch": 11.0,
288
+ "eval_accuracy": 0.5928486293206198,
289
+ "eval_loss": 1.1042790412902832,
290
+ "eval_model_preparation_time": 0.0035,
291
+ "eval_runtime": 232.2297,
292
+ "eval_samples_per_second": 108.384,
293
+ "eval_steps_per_second": 13.551,
294
+ "step": 12980
295
+ },
296
+ {
297
+ "epoch": 11.016949152542374,
298
+ "grad_norm": 6.826832294464111,
299
+ "learning_rate": 1.6808149405772494e-06,
300
+ "loss": 0.8752,
301
+ "step": 13000
302
+ },
303
+ {
304
+ "epoch": 11.440677966101696,
305
+ "grad_norm": 7.686470985412598,
306
+ "learning_rate": 1.629881154499151e-06,
307
+ "loss": 0.8655,
308
+ "step": 13500
309
+ },
310
+ {
311
+ "epoch": 11.864406779661017,
312
+ "grad_norm": 5.65968132019043,
313
+ "learning_rate": 1.5789473684210526e-06,
314
+ "loss": 0.8577,
315
+ "step": 14000
316
+ },
317
+ {
318
+ "epoch": 12.0,
319
+ "eval_accuracy": 0.5943980929678189,
320
+ "eval_loss": 1.096846103668213,
321
+ "eval_model_preparation_time": 0.0035,
322
+ "eval_runtime": 231.7997,
323
+ "eval_samples_per_second": 108.585,
324
+ "eval_steps_per_second": 13.576,
325
+ "step": 14160
326
+ },
327
+ {
328
+ "epoch": 12.288135593220339,
329
+ "grad_norm": 14.164326667785645,
330
+ "learning_rate": 1.5280135823429543e-06,
331
+ "loss": 0.8592,
332
+ "step": 14500
333
+ },
334
+ {
335
+ "epoch": 12.711864406779661,
336
+ "grad_norm": 12.4461669921875,
337
+ "learning_rate": 1.4770797962648558e-06,
338
+ "loss": 0.8458,
339
+ "step": 15000
340
+ },
341
+ {
342
+ "epoch": 13.0,
343
+ "eval_accuracy": 0.6005164878823996,
344
+ "eval_loss": 1.083931565284729,
345
+ "eval_model_preparation_time": 0.0035,
346
+ "eval_runtime": 229.3297,
347
+ "eval_samples_per_second": 109.755,
348
+ "eval_steps_per_second": 13.723,
349
+ "step": 15340
350
+ },
351
+ {
352
+ "epoch": 13.135593220338983,
353
+ "grad_norm": 7.7317047119140625,
354
+ "learning_rate": 1.4261460101867572e-06,
355
+ "loss": 0.8351,
356
+ "step": 15500
357
+ },
358
+ {
359
+ "epoch": 13.559322033898304,
360
+ "grad_norm": 10.227279663085938,
361
+ "learning_rate": 1.3752122241086587e-06,
362
+ "loss": 0.819,
363
+ "step": 16000
364
+ },
365
+ {
366
+ "epoch": 13.983050847457626,
367
+ "grad_norm": 67.47618103027344,
368
+ "learning_rate": 1.3242784380305604e-06,
369
+ "loss": 0.8288,
370
+ "step": 16500
371
+ },
372
+ {
373
+ "epoch": 14.0,
374
+ "eval_accuracy": 0.5987286452125546,
375
+ "eval_loss": 1.081558108329773,
376
+ "eval_model_preparation_time": 0.0035,
377
+ "eval_runtime": 229.7478,
378
+ "eval_samples_per_second": 109.555,
379
+ "eval_steps_per_second": 13.698,
380
+ "step": 16520
381
+ },
382
+ {
383
+ "epoch": 14.40677966101695,
384
+ "grad_norm": 7.805768966674805,
385
+ "learning_rate": 1.273344651952462e-06,
386
+ "loss": 0.8122,
387
+ "step": 17000
388
+ },
389
+ {
390
+ "epoch": 14.830508474576272,
391
+ "grad_norm": 6.83695125579834,
392
+ "learning_rate": 1.2224108658743634e-06,
393
+ "loss": 0.8224,
394
+ "step": 17500
395
+ },
396
+ {
397
+ "epoch": 15.0,
398
+ "eval_accuracy": 0.6054827175208581,
399
+ "eval_loss": 1.0741792917251587,
400
+ "eval_model_preparation_time": 0.0035,
401
+ "eval_runtime": 231.8476,
402
+ "eval_samples_per_second": 108.563,
403
+ "eval_steps_per_second": 13.574,
404
+ "step": 17700
405
+ },
406
+ {
407
+ "epoch": 15.254237288135593,
408
+ "grad_norm": 5.755763530731201,
409
+ "learning_rate": 1.1714770797962649e-06,
410
+ "loss": 0.8031,
411
+ "step": 18000
412
+ },
413
+ {
414
+ "epoch": 15.677966101694915,
415
+ "grad_norm": 12.960982322692871,
416
+ "learning_rate": 1.1205432937181666e-06,
417
+ "loss": 0.8021,
418
+ "step": 18500
419
+ },
420
+ {
421
+ "epoch": 16.0,
422
+ "eval_accuracy": 0.6082638061183949,
423
+ "eval_loss": 1.0608046054840088,
424
+ "eval_model_preparation_time": 0.0035,
425
+ "eval_runtime": 230.4772,
426
+ "eval_samples_per_second": 109.208,
427
+ "eval_steps_per_second": 13.654,
428
+ "step": 18880
429
+ },
430
+ {
431
+ "epoch": 16.10169491525424,
432
+ "grad_norm": 12.1406831741333,
433
+ "learning_rate": 1.0696095076400678e-06,
434
+ "loss": 0.7949,
435
+ "step": 19000
436
+ },
437
+ {
438
+ "epoch": 16.52542372881356,
439
+ "grad_norm": 6.812028884887695,
440
+ "learning_rate": 1.0186757215619695e-06,
441
+ "loss": 0.7859,
442
+ "step": 19500
443
+ },
444
+ {
445
+ "epoch": 16.949152542372882,
446
+ "grad_norm": 19.037212371826172,
447
+ "learning_rate": 9.67741935483871e-07,
448
+ "loss": 0.7908,
449
+ "step": 20000
450
+ },
451
+ {
452
+ "epoch": 17.0,
453
+ "eval_accuracy": 0.6059594755661502,
454
+ "eval_loss": 1.059761881828308,
455
+ "eval_model_preparation_time": 0.0035,
456
+ "eval_runtime": 228.7616,
457
+ "eval_samples_per_second": 110.027,
458
+ "eval_steps_per_second": 13.757,
459
+ "step": 20060
460
+ },
461
+ {
462
+ "epoch": 17.372881355932204,
463
+ "grad_norm": 14.513190269470215,
464
+ "learning_rate": 9.168081494057725e-07,
465
+ "loss": 0.7982,
466
+ "step": 20500
467
+ },
468
+ {
469
+ "epoch": 17.796610169491526,
470
+ "grad_norm": 10.929338455200195,
471
+ "learning_rate": 8.658743633276741e-07,
472
+ "loss": 0.7715,
473
+ "step": 21000
474
+ },
475
+ {
476
+ "epoch": 18.0,
477
+ "eval_accuracy": 0.609217322208979,
478
+ "eval_loss": 1.051367998123169,
479
+ "eval_model_preparation_time": 0.0035,
480
+ "eval_runtime": 228.3563,
481
+ "eval_samples_per_second": 110.223,
482
+ "eval_steps_per_second": 13.781,
483
+ "step": 21240
484
+ },
485
+ {
486
+ "epoch": 18.220338983050848,
487
+ "grad_norm": 8.468095779418945,
488
+ "learning_rate": 8.149405772495756e-07,
489
+ "loss": 0.7715,
490
+ "step": 21500
491
+ },
492
+ {
493
+ "epoch": 18.64406779661017,
494
+ "grad_norm": 10.626036643981934,
495
+ "learning_rate": 7.640067911714771e-07,
496
+ "loss": 0.7693,
497
+ "step": 22000
498
+ },
499
+ {
500
+ "epoch": 19.0,
501
+ "eval_accuracy": 0.6102502979737783,
502
+ "eval_loss": 1.0482922792434692,
503
+ "eval_model_preparation_time": 0.0035,
504
+ "eval_runtime": 226.5389,
505
+ "eval_samples_per_second": 111.107,
506
+ "eval_steps_per_second": 13.892,
507
+ "step": 22420
508
+ },
509
+ {
510
+ "epoch": 19.06779661016949,
511
+ "grad_norm": 9.89828872680664,
512
+ "learning_rate": 7.130730050933786e-07,
513
+ "loss": 0.7658,
514
+ "step": 22500
515
+ },
516
+ {
517
+ "epoch": 19.491525423728813,
518
+ "grad_norm": 6.61522912979126,
519
+ "learning_rate": 6.621392190152802e-07,
520
+ "loss": 0.7604,
521
+ "step": 23000
522
+ },
523
+ {
524
+ "epoch": 19.915254237288135,
525
+ "grad_norm": 7.224503517150879,
526
+ "learning_rate": 6.112054329371817e-07,
527
+ "loss": 0.7632,
528
+ "step": 23500
529
+ },
530
+ {
531
+ "epoch": 20.0,
532
+ "eval_accuracy": 0.609217322208979,
533
+ "eval_loss": 1.0445903539657593,
534
+ "eval_model_preparation_time": 0.0035,
535
+ "eval_runtime": 228.7363,
536
+ "eval_samples_per_second": 110.039,
537
+ "eval_steps_per_second": 13.758,
538
+ "step": 23600
539
+ }
540
+ ],
541
+ "logging_steps": 500,
542
+ "max_steps": 29500,
543
+ "num_input_tokens_seen": 0,
544
+ "num_train_epochs": 25,
545
+ "save_steps": 500,
546
+ "stateful_callbacks": {
547
+ "TrainerControl": {
548
+ "args": {
549
+ "should_epoch_stop": false,
550
+ "should_evaluate": false,
551
+ "should_log": false,
552
+ "should_save": true,
553
+ "should_training_stop": false
554
+ },
555
+ "attributes": {}
556
+ }
557
+ },
558
+ "total_flos": 5.851372376087952e+19,
559
+ "train_batch_size": 32,
560
+ "trial_name": null,
561
+ "trial_params": null
562
+ }
checkpoint-23600/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f181ea4d6b78b8ebc76916156391c8b0e76c46c4d10c4f2ecf0d9c23a8f4912a
3
+ size 4859
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "sad",
13
+ "1": "disgust",
14
+ "2": "angry",
15
+ "3": "neutral",
16
+ "4": "fear",
17
+ "5": "surprise",
18
+ "6": "happy"
19
+ },
20
+ "image_size": 224,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "angry": 2,
25
+ "disgust": 1,
26
+ "fear": 4,
27
+ "happy": 6,
28
+ "neutral": 3,
29
+ "sad": 0,
30
+ "surprise": 5
31
+ },
32
+ "layer_norm_eps": 1e-12,
33
+ "model_type": "vit",
34
+ "num_attention_heads": 12,
35
+ "num_channels": 3,
36
+ "num_hidden_layers": 12,
37
+ "patch_size": 16,
38
+ "problem_type": "single_label_classification",
39
+ "qkv_bias": true,
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.47.1"
42
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33773b4580db1e26a0b690c0fa200cf4c0844e95e4769e234a682b37712bed06
3
+ size 343239356
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f181ea4d6b78b8ebc76916156391c8b0e76c46c4d10c4f2ecf0d9c23a8f4912a
3
+ size 4859