Jacques7103 commited on
Commit
83bd136
·
1 Parent(s): f620103

🍻 cheers

Browse files
README.md CHANGED
@@ -2,6 +2,7 @@
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
 
5
  - generated_from_trainer
6
  metrics:
7
  - accuracy
@@ -15,10 +16,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # food-recognition
17
 
18
- This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 4.6230
21
- - Accuracy: 0.4518
22
 
23
  ## Model description
24
 
 
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
5
+ - image-classification
6
  - generated_from_trainer
7
  metrics:
8
  - accuracy
 
16
 
17
  # food-recognition
18
 
19
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the indian_food_images dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 4.0433
22
+ - Accuracy: 0.4436
23
 
24
  ## Model description
25
 
all_results.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
  "epoch": 4.0,
3
- "eval_accuracy": 1.0,
4
- "eval_loss": 3.4838707447052,
5
- "eval_runtime": 1.3666,
6
- "eval_samples_per_second": 3.659,
7
- "eval_steps_per_second": 0.732,
8
- "train_loss": 3.7712278366088867,
9
- "train_runtime": 16.7176,
10
- "train_samples_per_second": 1.196,
11
- "train_steps_per_second": 0.239
12
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "eval_accuracy": 0.44363636363636366,
4
+ "eval_loss": 4.043280601501465,
5
+ "eval_runtime": 282.0238,
6
+ "eval_samples_per_second": 3.9,
7
+ "eval_steps_per_second": 0.489,
8
+ "train_loss": 0.386546674683906,
9
+ "train_runtime": 3742.8868,
10
+ "train_samples_per_second": 1.176,
11
+ "train_steps_per_second": 0.074
12
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "eval_accuracy": 1.0,
4
- "eval_loss": 3.4838707447052,
5
- "eval_runtime": 1.3666,
6
- "eval_samples_per_second": 3.659,
7
- "eval_steps_per_second": 0.732
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "eval_accuracy": 0.44363636363636366,
4
+ "eval_loss": 4.043280601501465,
5
+ "eval_runtime": 282.0238,
6
+ "eval_samples_per_second": 3.9,
7
+ "eval_steps_per_second": 0.489
8
  }
runs/Dec04_13-31-59_LAPTOP-PH299HA6/events.out.tfevents.1701675975.LAPTOP-PH299HA6.21992.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a210a038a59acf9e315c154c1188d0b04d6164d66e133c21456a2b9bd16774b8
3
+ size 363
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 4.0,
3
- "train_loss": 3.7712278366088867,
4
- "train_runtime": 16.7176,
5
- "train_samples_per_second": 1.196,
6
- "train_steps_per_second": 0.239
7
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "train_loss": 0.386546674683906,
4
+ "train_runtime": 3742.8868,
5
+ "train_samples_per_second": 1.176,
6
+ "train_steps_per_second": 0.074
7
  }
trainer_state.json CHANGED
@@ -1,28 +1,208 @@
1
  {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 100,
6
- "global_step": 4,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  {
12
  "epoch": 4.0,
13
- "step": 4,
14
- "total_flos": 1551214973952000.0,
15
- "train_loss": 3.7712278366088867,
16
- "train_runtime": 16.7176,
17
- "train_samples_per_second": 1.196,
18
- "train_steps_per_second": 0.239
19
  }
20
  ],
21
  "logging_steps": 10,
22
- "max_steps": 4,
23
  "num_train_epochs": 4,
24
  "save_steps": 100,
25
- "total_flos": 1551214973952000.0,
26
  "trial_name": null,
27
  "trial_params": null
28
  }
 
1
  {
2
+ "best_metric": 4.043280601501465,
3
+ "best_model_checkpoint": "food-recognition\\checkpoint-100",
4
  "epoch": 4.0,
5
  "eval_steps": 100,
6
+ "global_step": 276,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
+ {
12
+ "epoch": 0.14,
13
+ "learning_rate": 0.0001927536231884058,
14
+ "loss": 3.2682,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.29,
19
+ "learning_rate": 0.0001855072463768116,
20
+ "loss": 1.8646,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.43,
25
+ "learning_rate": 0.0001782608695652174,
26
+ "loss": 1.287,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.58,
31
+ "learning_rate": 0.0001710144927536232,
32
+ "loss": 0.9426,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.72,
37
+ "learning_rate": 0.000163768115942029,
38
+ "loss": 0.7407,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.87,
43
+ "learning_rate": 0.0001565217391304348,
44
+ "loss": 0.5816,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 1.01,
49
+ "learning_rate": 0.00014927536231884058,
50
+ "loss": 0.4059,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 1.16,
55
+ "learning_rate": 0.00014202898550724638,
56
+ "loss": 0.27,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 1.3,
61
+ "learning_rate": 0.0001347826086956522,
62
+ "loss": 0.1798,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 1.45,
67
+ "learning_rate": 0.00012753623188405797,
68
+ "loss": 0.1598,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 1.45,
73
+ "eval_accuracy": 0.44363636363636366,
74
+ "eval_loss": 4.043280601501465,
75
+ "eval_runtime": 278.9851,
76
+ "eval_samples_per_second": 3.943,
77
+ "eval_steps_per_second": 0.495,
78
+ "step": 100
79
+ },
80
+ {
81
+ "epoch": 1.59,
82
+ "learning_rate": 0.00012028985507246378,
83
+ "loss": 0.1136,
84
+ "step": 110
85
+ },
86
+ {
87
+ "epoch": 1.74,
88
+ "learning_rate": 0.00011304347826086956,
89
+ "loss": 0.1096,
90
+ "step": 120
91
+ },
92
+ {
93
+ "epoch": 1.88,
94
+ "learning_rate": 0.00010579710144927538,
95
+ "loss": 0.0863,
96
+ "step": 130
97
+ },
98
+ {
99
+ "epoch": 2.03,
100
+ "learning_rate": 9.855072463768117e-05,
101
+ "loss": 0.0664,
102
+ "step": 140
103
+ },
104
+ {
105
+ "epoch": 2.17,
106
+ "learning_rate": 9.130434782608696e-05,
107
+ "loss": 0.0865,
108
+ "step": 150
109
+ },
110
+ {
111
+ "epoch": 2.32,
112
+ "learning_rate": 8.405797101449276e-05,
113
+ "loss": 0.0441,
114
+ "step": 160
115
+ },
116
+ {
117
+ "epoch": 2.46,
118
+ "learning_rate": 7.681159420289855e-05,
119
+ "loss": 0.0509,
120
+ "step": 170
121
+ },
122
+ {
123
+ "epoch": 2.61,
124
+ "learning_rate": 6.956521739130436e-05,
125
+ "loss": 0.0412,
126
+ "step": 180
127
+ },
128
+ {
129
+ "epoch": 2.75,
130
+ "learning_rate": 6.231884057971015e-05,
131
+ "loss": 0.0701,
132
+ "step": 190
133
+ },
134
+ {
135
+ "epoch": 2.9,
136
+ "learning_rate": 5.507246376811594e-05,
137
+ "loss": 0.038,
138
+ "step": 200
139
+ },
140
+ {
141
+ "epoch": 2.9,
142
+ "eval_accuracy": 0.45181818181818184,
143
+ "eval_loss": 4.6229705810546875,
144
+ "eval_runtime": 289.4417,
145
+ "eval_samples_per_second": 3.8,
146
+ "eval_steps_per_second": 0.477,
147
+ "step": 200
148
+ },
149
+ {
150
+ "epoch": 3.04,
151
+ "learning_rate": 4.782608695652174e-05,
152
+ "loss": 0.0318,
153
+ "step": 210
154
+ },
155
+ {
156
+ "epoch": 3.19,
157
+ "learning_rate": 4.057971014492754e-05,
158
+ "loss": 0.0318,
159
+ "step": 220
160
+ },
161
+ {
162
+ "epoch": 3.33,
163
+ "learning_rate": 3.3333333333333335e-05,
164
+ "loss": 0.0471,
165
+ "step": 230
166
+ },
167
+ {
168
+ "epoch": 3.48,
169
+ "learning_rate": 2.608695652173913e-05,
170
+ "loss": 0.028,
171
+ "step": 240
172
+ },
173
+ {
174
+ "epoch": 3.62,
175
+ "learning_rate": 1.8840579710144928e-05,
176
+ "loss": 0.0272,
177
+ "step": 250
178
+ },
179
+ {
180
+ "epoch": 3.77,
181
+ "learning_rate": 1.1594202898550725e-05,
182
+ "loss": 0.0266,
183
+ "step": 260
184
+ },
185
+ {
186
+ "epoch": 3.91,
187
+ "learning_rate": 4.347826086956522e-06,
188
+ "loss": 0.0532,
189
+ "step": 270
190
+ },
191
  {
192
  "epoch": 4.0,
193
+ "step": 276,
194
+ "total_flos": 3.4126729426944e+17,
195
+ "train_loss": 0.386546674683906,
196
+ "train_runtime": 3742.8868,
197
+ "train_samples_per_second": 1.176,
198
+ "train_steps_per_second": 0.074
199
  }
200
  ],
201
  "logging_steps": 10,
202
+ "max_steps": 276,
203
  "num_train_epochs": 4,
204
  "save_steps": 100,
205
+ "total_flos": 3.4126729426944e+17,
206
  "trial_name": null,
207
  "trial_params": null
208
  }