eduagarcia commited on
Commit
00a7095
·
verified ·
1 Parent(s): 84e70d3

Uploading raw results for princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2

Browse files
Files changed (14) hide show
  1. .gitattributes +3 -0
  2. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2024-08-11T19-34-03.542478/results.json +1207 -1207
  3. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_assin2_rte.jsonl +3 -0
  4. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_assin2_sts.jsonl +3 -0
  5. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_bluex.jsonl +0 -0
  6. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_enem_challenge.jsonl +0 -0
  7. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_faquad_nli.jsonl +0 -0
  8. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_hatebr_offensive.jsonl +0 -0
  9. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_oab_exams.jsonl +3 -0
  10. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_portuguese_hate_speech.jsonl +0 -0
  11. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_tweetsentbr.jsonl +0 -0
  12. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/results.json +1120 -0
  13. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2024-08-11T19-34-03.542478.json +22 -22
  14. princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2025-05-07T15-51-57.421344.json +208 -0
.gitattributes CHANGED
@@ -11047,3 +11047,6 @@ Magpie-Align/MagpieLM-8B-Chat-v0.1/raw_2025-05-07T15-22-16.111876/pretrained__Ma
11047
  Magpie-Align/MagpieLM-8B-Chat-v0.1/raw_2025-05-07T15-22-16.111876/pretrained__Magpie-Align__MagpieLM-8B-Chat-v0.1,dtype__bfloat16,revision__main,trust_remote_code__True,max_length__4098_enem_challenge.jsonl filter=lfs diff=lfs merge=lfs -text
11048
  Magpie-Align/MagpieLM-8B-Chat-v0.1/raw_2025-05-07T15-22-16.111876/pretrained__Magpie-Align__MagpieLM-8B-Chat-v0.1,dtype__bfloat16,revision__main,trust_remote_code__True,max_length__4098_oab_exams.jsonl filter=lfs diff=lfs merge=lfs -text
11049
  Magpie-Align/MagpieLM-8B-Chat-v0.1/raw_2025-05-07T15-22-16.111876/pretrained__Magpie-Align__MagpieLM-8B-Chat-v0.1,dtype__bfloat16,revision__main,trust_remote_code__True,max_length__4098_tweetsentbr.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
11047
  Magpie-Align/MagpieLM-8B-Chat-v0.1/raw_2025-05-07T15-22-16.111876/pretrained__Magpie-Align__MagpieLM-8B-Chat-v0.1,dtype__bfloat16,revision__main,trust_remote_code__True,max_length__4098_enem_challenge.jsonl filter=lfs diff=lfs merge=lfs -text
11048
  Magpie-Align/MagpieLM-8B-Chat-v0.1/raw_2025-05-07T15-22-16.111876/pretrained__Magpie-Align__MagpieLM-8B-Chat-v0.1,dtype__bfloat16,revision__main,trust_remote_code__True,max_length__4098_oab_exams.jsonl filter=lfs diff=lfs merge=lfs -text
11049
  Magpie-Align/MagpieLM-8B-Chat-v0.1/raw_2025-05-07T15-22-16.111876/pretrained__Magpie-Align__MagpieLM-8B-Chat-v0.1,dtype__bfloat16,revision__main,trust_remote_code__True,max_length__4098_tweetsentbr.jsonl filter=lfs diff=lfs merge=lfs -text
11050
+ princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_assin2_rte.jsonl filter=lfs diff=lfs merge=lfs -text
11051
+ princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_assin2_sts.jsonl filter=lfs diff=lfs merge=lfs -text
11052
+ princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_oab_exams.jsonl filter=lfs diff=lfs merge=lfs -text
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2024-08-11T19-34-03.542478/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.896457973466223,
5
- "acc,all": 0.8905228758169934,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.6991081985044032,
10
- "mse,all": 1.1034438985734072,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5591098748261474,
15
- "acc,exam_id__UNICAMP_2018": 0.5,
16
- "acc,exam_id__USP_2022": 0.5918367346938775,
17
- "acc,exam_id__UNICAMP_2019": 0.56,
18
- "acc,exam_id__UNICAMP_2020": 0.6181818181818182,
19
- "acc,exam_id__USP_2024": 0.6341463414634146,
20
- "acc,exam_id__UNICAMP_2024": 0.5777777777777777,
21
- "acc,exam_id__USP_2021": 0.5576923076923077,
22
- "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824,
23
- "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
24
- "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
25
- "acc,exam_id__USP_2023": 0.6363636363636364,
26
- "acc,exam_id__UNICAMP_2023": 0.6744186046511628,
27
- "acc,exam_id__USP_2018": 0.4444444444444444,
28
- "acc,exam_id__USP_2020": 0.5178571428571429,
29
- "acc,exam_id__USP_2019": 0.55,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6578026592022393,
35
- "acc,exam_id__2011": 0.7094017094017094,
36
- "acc,exam_id__2010": 0.6752136752136753,
37
- "acc,exam_id__2009": 0.6782608695652174,
38
- "acc,exam_id__2015": 0.6890756302521008,
39
- "acc,exam_id__2017": 0.6896551724137931,
40
- "acc,exam_id__2014": 0.6330275229357798,
41
- "acc,exam_id__2022": 0.631578947368421,
42
- "acc,exam_id__2012": 0.6637931034482759,
43
- "acc,exam_id__2023": 0.6592592592592592,
44
- "acc,exam_id__2016_2": 0.6178861788617886,
45
- "acc,exam_id__2016": 0.5867768595041323,
46
- "acc,exam_id__2013": 0.6666666666666666
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.664955263315919,
50
- "acc,all": 0.6892307692307692,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8632511993787806,
56
- "acc,all": 0.86
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.4733485193621868,
60
- "acc,exam_id__2017-24": 0.4625,
61
- "acc,exam_id__2013-12": 0.525,
62
- "acc,exam_id__2015-18": 0.4875,
63
- "acc,exam_id__2018-25": 0.5,
64
- "acc,exam_id__2016-19": 0.5769230769230769,
65
- "acc,exam_id__2010-01": 0.3764705882352941,
66
- "acc,exam_id__2016-20": 0.5125,
67
- "acc,exam_id__2016-21": 0.4,
68
- "acc,exam_id__2011-04": 0.45,
69
- "acc,exam_id__2014-15": 0.6282051282051282,
70
- "acc,exam_id__2010-02": 0.46,
71
- "acc,exam_id__2013-10": 0.4,
72
- "acc,exam_id__2012-07": 0.5,
73
- "acc,exam_id__2015-17": 0.5769230769230769,
74
- "acc,exam_id__2013-11": 0.5,
75
- "acc,exam_id__2012-09": 0.4025974025974026,
76
- "acc,exam_id__2015-16": 0.45,
77
- "acc,exam_id__2012-08": 0.4625,
78
- "acc,exam_id__2011-03": 0.3939393939393939,
79
- "acc,exam_id__2016-20a": 0.3875,
80
- "acc,exam_id__2014-13": 0.4125,
81
- "acc,exam_id__2017-23": 0.45,
82
- "acc,exam_id__2012-06": 0.475,
83
- "acc,exam_id__2011-05": 0.4375,
84
- "acc,exam_id__2014-14": 0.55,
85
- "acc,exam_id__2012-06a": 0.5125,
86
- "acc,exam_id__2017-22": 0.525,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6911243989372992,
92
- "acc,all": 0.7050528789659224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.6516261323497358,
96
- "acc,all": 0.7089552238805971,
97
- "alias": "tweetsentbr"
98
  }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
- },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
184
- }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
  },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
208
- },
209
- {
210
- "function": "take_first"
211
- }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7fd7dd0f54e0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
- },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
  },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
328
- },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
  }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7fd7dd0f4ea0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
  },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
  },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
- },
395
- {
396
- "function": "remove_accents"
397
- },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
414
- },
415
- {
416
- "function": "take_first"
417
- }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd7dd0f5120>",
426
- "metadata": {
427
- "version": 1.1
428
  }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7fd7dd0f56c0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
  },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
- },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
503
- },
504
- {
505
- "function": "take_first"
506
- }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd7dd0f5940>",
515
- "metadata": {
516
- "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
- },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
  },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
624
- },
625
- {
626
- "function": "take_first"
627
- }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
- },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
  },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
744
- },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
  }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7fd7dd0f4860>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
793
  },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
  },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
- },
821
- {
822
- "function": "remove_accents"
823
- },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
839
- },
840
- {
841
- "function": "take_first"
842
- }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7fd7dd0f4ae0>",
851
- "metadata": {
852
- "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
- },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
  },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
963
- },
964
- {
965
- "function": "take_first"
966
- }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
- },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
  },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
- },
1028
- {
1029
- "function": "take_first"
1030
- }
1031
- ]
1032
- }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
 
 
 
 
1061
  },
1062
- "model_meta": {
1063
- "truncated": 0,
1064
- "non_truncated": 14150,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 0,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "2b18bdce3e3e8f588f48ff62350db9620ceefde3",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 16060530944,
1075
- "model_num_parameters": 8030261248,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 8,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
  },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1318.5322712418301,
1094
- "min_seq_length": 1299,
1095
- "max_seq_length": 1382,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1509.5322712418301,
1109
- "min_seq_length": 1490,
1110
- "max_seq_length": 1573,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
- },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1484.7719054242002,
1124
- "min_seq_length": 1165,
1125
- "max_seq_length": 2134,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
- },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 0,
1134
- "non_truncated": 1429,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 0,
1138
- "mean_seq_length": 1412.3547935619315,
1139
- "min_seq_length": 1187,
1140
- "max_seq_length": 2340,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 3.0
1145
- },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1447.8215384615385,
1154
- "min_seq_length": 1402,
1155
- "max_seq_length": 1544,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
1160
- },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1279.3878571428572,
1169
- "min_seq_length": 1259,
1170
- "max_seq_length": 1498,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1220.3772209567198,
1184
- "min_seq_length": 988,
1185
- "max_seq_length": 1654,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
1190
- },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1676.4195064629848,
1199
- "min_seq_length": 1646,
1200
- "max_seq_length": 1708,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
1205
- },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1537.1537313432837,
1214
- "min_seq_length": 1520,
1215
- "max_seq_length": 1585,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
  },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
  },
1243
- "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.597638648977482,
5
+ "acc,all": 0.8905228758169934,
6
+ "alias": "assin2_rte"
7
+ },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.6991081985044032,
10
+ "mse,all": 1.1034438985734072,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5591098748261474,
15
+ "acc,exam_id__UNICAMP_2018": 0.5,
16
+ "acc,exam_id__USP_2022": 0.5918367346938775,
17
+ "acc,exam_id__UNICAMP_2019": 0.56,
18
+ "acc,exam_id__UNICAMP_2020": 0.6181818181818182,
19
+ "acc,exam_id__USP_2024": 0.6341463414634146,
20
+ "acc,exam_id__UNICAMP_2024": 0.5777777777777777,
21
+ "acc,exam_id__USP_2021": 0.5576923076923077,
22
+ "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824,
23
+ "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
24
+ "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
25
+ "acc,exam_id__USP_2023": 0.6363636363636364,
26
+ "acc,exam_id__UNICAMP_2023": 0.6744186046511628,
27
+ "acc,exam_id__USP_2018": 0.4444444444444444,
28
+ "acc,exam_id__USP_2020": 0.5178571428571429,
29
+ "acc,exam_id__USP_2019": 0.55,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6578026592022393,
35
+ "acc,exam_id__2011": 0.7094017094017094,
36
+ "acc,exam_id__2010": 0.6752136752136753,
37
+ "acc,exam_id__2009": 0.6782608695652174,
38
+ "acc,exam_id__2015": 0.6890756302521008,
39
+ "acc,exam_id__2017": 0.6896551724137931,
40
+ "acc,exam_id__2014": 0.6330275229357798,
41
+ "acc,exam_id__2022": 0.631578947368421,
42
+ "acc,exam_id__2012": 0.6637931034482759,
43
+ "acc,exam_id__2023": 0.6592592592592592,
44
+ "acc,exam_id__2016_2": 0.6178861788617886,
45
+ "acc,exam_id__2016": 0.5867768595041323,
46
+ "acc,exam_id__2013": 0.6666666666666666
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.4433035088772794,
50
+ "acc,all": 0.6892307692307692,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.5755007995858538,
56
+ "acc,all": 0.86
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.4733485193621868,
60
+ "acc,exam_id__2017-24": 0.4625,
61
+ "acc,exam_id__2013-12": 0.525,
62
+ "acc,exam_id__2015-18": 0.4875,
63
+ "acc,exam_id__2018-25": 0.5,
64
+ "acc,exam_id__2016-19": 0.5769230769230769,
65
+ "acc,exam_id__2010-01": 0.3764705882352941,
66
+ "acc,exam_id__2016-20": 0.5125,
67
+ "acc,exam_id__2016-21": 0.4,
68
+ "acc,exam_id__2011-04": 0.45,
69
+ "acc,exam_id__2014-15": 0.6282051282051282,
70
+ "acc,exam_id__2010-02": 0.46,
71
+ "acc,exam_id__2013-10": 0.4,
72
+ "acc,exam_id__2012-07": 0.5,
73
+ "acc,exam_id__2015-17": 0.5769230769230769,
74
+ "acc,exam_id__2013-11": 0.5,
75
+ "acc,exam_id__2012-09": 0.4025974025974026,
76
+ "acc,exam_id__2015-16": 0.45,
77
+ "acc,exam_id__2012-08": 0.4625,
78
+ "acc,exam_id__2011-03": 0.3939393939393939,
79
+ "acc,exam_id__2016-20a": 0.3875,
80
+ "acc,exam_id__2014-13": 0.4125,
81
+ "acc,exam_id__2017-23": 0.45,
82
+ "acc,exam_id__2012-06": 0.475,
83
+ "acc,exam_id__2011-05": 0.4375,
84
+ "acc,exam_id__2014-14": 0.55,
85
+ "acc,exam_id__2012-06a": 0.5125,
86
+ "acc,exam_id__2017-22": 0.525,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.46074959929153286,
92
+ "acc,all": 0.7050528789659224
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.4887195992623018,
96
+ "acc,all": 0.7089552238805971,
97
+ "alias": "tweetsentbr"
98
+ }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
  },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
  }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
+ },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
+ {
210
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
+ }
219
+ },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fd7dd0f54e0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
+ },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
  },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
315
+ },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
+ {
330
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fd7dd0f4ea0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
+ },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
+ },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
  },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
 
 
414
  },
415
+ {
416
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd7dd0f5120>",
426
+ "metadata": {
427
+ "version": 1.1
428
+ }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fd7dd0f56c0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
456
+ },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
+ },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
  },
484
+ {
485
+ "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
  },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd7dd0f5940>",
515
+ "metadata": {
516
+ "version": 1.1
517
+ }
518
+ },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
+ },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
  },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
+ {
626
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
+ }
635
+ },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
+ },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
  },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
732
+ },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
+ {
746
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
747
  }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fd7dd0f4860>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
+ },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
+ },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
  },
821
+ {
822
+ "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fd7dd0f4ae0>",
851
+ "metadata": {
852
+ "version": 1.5
853
+ }
854
+ },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
  },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
+ },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
+ {
965
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
+ }
974
+ },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
990
+ },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
  },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
 
 
 
 
 
 
 
 
 
 
1027
  },
1028
+ {
1029
+ "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
+ ]
1032
  }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
+ }
1038
+ }
1039
+ },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
1050
+ },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
1061
+ },
1062
+ "model_meta": {
1063
+ "truncated": 0,
1064
+ "non_truncated": 14150,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 0,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "2b18bdce3e3e8f588f48ff62350db9620ceefde3",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 16060530944,
1075
+ "model_num_parameters": 8030261248,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 8,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
+ },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1318.5322712418301,
1094
+ "min_seq_length": 1299,
1095
+ "max_seq_length": 1382,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
  },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1509.5322712418301,
1109
+ "min_seq_length": 1490,
1110
+ "max_seq_length": 1573,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
  },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1484.7719054242002,
1124
+ "min_seq_length": 1165,
1125
+ "max_seq_length": 2134,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
  },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 0,
1134
+ "non_truncated": 1429,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 0,
1138
+ "mean_seq_length": 1412.3547935619315,
1139
+ "min_seq_length": 1187,
1140
+ "max_seq_length": 2340,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
1145
  },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1447.8215384615385,
1154
+ "min_seq_length": 1402,
1155
+ "max_seq_length": 1544,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1279.3878571428572,
1169
+ "min_seq_length": 1259,
1170
+ "max_seq_length": 1498,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1220.3772209567198,
1184
+ "min_seq_length": 988,
1185
+ "max_seq_length": 1654,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1676.4195064629848,
1199
+ "min_seq_length": 1646,
1200
+ "max_seq_length": 1708,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1537.1537313432837,
1214
+ "min_seq_length": 1520,
1215
+ "max_seq_length": 1585,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
+ },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
+ },
1243
+ "git_hash": "5a13f3e"
1244
  }
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_assin2_rte.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a14cf05c08fe08fd0a6e674ee99a9ac198b3c2d9e89a87aa4bd5ea50d3ade7a3
3
+ size 12029671
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_assin2_sts.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0896e82748bdeb24b236dd5bf0098fb0609bc228c657d0c9ac210d97bbeb20f1
3
+ size 12448399
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_bluex.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_enem_challenge.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_faquad_nli.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_hatebr_offensive.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_oab_exams.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80a0366f816bca0605a0d7e79c2b88031b8b1c1cabc570d7e77167360f5ee121
3
+ size 14563279
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_portuguese_hate_speech.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/pretrained__princeton-nlp__Llama-3-Instruct-8B-SimPO-v0.2,dtype__bfloat16,revision__9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code__True,max_length__4098_tweetsentbr.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/raw_2025-05-07T15-51-57.421344/results.json ADDED
@@ -0,0 +1,1120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.8739668044845157,
5
+ "acc,all": 0.8574346405228758,
6
+ "alias": "assin2_rte"
7
+ },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.6849205578626394,
10
+ "mse,all": 1.0289630421918505,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5424200278164116,
15
+ "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921,
16
+ "acc,exam_id__UNICAMP_2018": 0.46296296296296297,
17
+ "acc,exam_id__USP_2024": 0.6341463414634146,
18
+ "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
19
+ "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
20
+ "acc,exam_id__USP_2020": 0.6071428571428571,
21
+ "acc,exam_id__UNICAMP_2022": 0.48717948717948717,
22
+ "acc,exam_id__UNICAMP_2024": 0.5555555555555556,
23
+ "acc,exam_id__USP_2021": 0.5769230769230769,
24
+ "acc,exam_id__USP_2018": 0.46296296296296297,
25
+ "acc,exam_id__UNICAMP_2020": 0.5454545454545454,
26
+ "acc,exam_id__USP_2022": 0.5306122448979592,
27
+ "acc,exam_id__USP_2023": 0.6136363636363636,
28
+ "acc,exam_id__USP_2019": 0.5,
29
+ "acc,exam_id__UNICAMP_2019": 0.56,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6543037088873338,
35
+ "acc,exam_id__2010": 0.7008547008547008,
36
+ "acc,exam_id__2015": 0.7226890756302521,
37
+ "acc,exam_id__2012": 0.6379310344827587,
38
+ "acc,exam_id__2009": 0.6956521739130435,
39
+ "acc,exam_id__2016_2": 0.5772357723577236,
40
+ "acc,exam_id__2016": 0.6033057851239669,
41
+ "acc,exam_id__2017": 0.6551724137931034,
42
+ "acc,exam_id__2014": 0.6330275229357798,
43
+ "acc,exam_id__2022": 0.6165413533834586,
44
+ "acc,exam_id__2023": 0.6814814814814815,
45
+ "acc,exam_id__2011": 0.7008547008547008,
46
+ "acc,exam_id__2013": 0.6296296296296297
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.6179372835523507,
50
+ "acc,all": 0.5938461538461538,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8419469928644241,
56
+ "acc,all": 0.8264285714285714
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.46378132118451026,
60
+ "acc,exam_id__2012-08": 0.4625,
61
+ "acc,exam_id__2015-16": 0.4625,
62
+ "acc,exam_id__2014-13": 0.475,
63
+ "acc,exam_id__2011-03": 0.36363636363636365,
64
+ "acc,exam_id__2016-19": 0.5641025641025641,
65
+ "acc,exam_id__2012-07": 0.4875,
66
+ "acc,exam_id__2011-05": 0.45,
67
+ "acc,exam_id__2013-12": 0.5,
68
+ "acc,exam_id__2017-23": 0.4375,
69
+ "acc,exam_id__2016-21": 0.4,
70
+ "acc,exam_id__2016-20": 0.5125,
71
+ "acc,exam_id__2013-10": 0.3875,
72
+ "acc,exam_id__2010-01": 0.3764705882352941,
73
+ "acc,exam_id__2011-04": 0.4125,
74
+ "acc,exam_id__2015-17": 0.5897435897435898,
75
+ "acc,exam_id__2017-22": 0.525,
76
+ "acc,exam_id__2016-20a": 0.3875,
77
+ "acc,exam_id__2017-24": 0.475,
78
+ "acc,exam_id__2015-18": 0.4375,
79
+ "acc,exam_id__2014-14": 0.5375,
80
+ "acc,exam_id__2013-11": 0.45,
81
+ "acc,exam_id__2012-06a": 0.4625,
82
+ "acc,exam_id__2010-02": 0.45,
83
+ "acc,exam_id__2012-06": 0.4125,
84
+ "acc,exam_id__2014-15": 0.6282051282051282,
85
+ "acc,exam_id__2018-25": 0.475,
86
+ "acc,exam_id__2012-09": 0.44155844155844154,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7047160293321522,
92
+ "acc,all": 0.7038777908343126
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6579725203362695,
96
+ "acc,all": 0.7079601990049751,
97
+ "alias": "tweetsentbr"
98
+ }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "do_sample": false,
189
+ "temperature": 0.0,
190
+ "top_k": null,
191
+ "top_p": null,
192
+ "until": [
193
+ "\n\n"
194
+ ]
195
+ },
196
+ "repeats": 1,
197
+ "filter_list": [
198
+ {
199
+ "name": "all",
200
+ "filter": [
201
+ {
202
+ "function": "find_similar_label",
203
+ "labels": [
204
+ "Sim",
205
+ "Não"
206
+ ]
207
+ },
208
+ {
209
+ "function": "take_first"
210
+ }
211
+ ]
212
+ }
213
+ ],
214
+ "should_decontaminate": false,
215
+ "metadata": {
216
+ "version": 1.1
217
+ }
218
+ },
219
+ "assin2_sts": {
220
+ "task": "assin2_sts",
221
+ "group": [
222
+ "pt_benchmark",
223
+ "assin2"
224
+ ],
225
+ "dataset_path": "assin2",
226
+ "test_split": "test",
227
+ "fewshot_split": "train",
228
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
229
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f753cfac040>",
230
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
231
+ "target_delimiter": " ",
232
+ "fewshot_delimiter": "\n\n",
233
+ "fewshot_config": {
234
+ "sampler": "id_sampler",
235
+ "sampler_config": {
236
+ "id_list": [
237
+ 1,
238
+ 3251,
239
+ 2,
240
+ 3252,
241
+ 3,
242
+ 4,
243
+ 5,
244
+ 6,
245
+ 3253,
246
+ 7,
247
+ 3254,
248
+ 3255,
249
+ 3256,
250
+ 8,
251
+ 9,
252
+ 10,
253
+ 3257,
254
+ 11,
255
+ 3258,
256
+ 12,
257
+ 13,
258
+ 14,
259
+ 15,
260
+ 3259,
261
+ 3260,
262
+ 3261,
263
+ 3262,
264
+ 3263,
265
+ 16,
266
+ 17,
267
+ 3264,
268
+ 18,
269
+ 3265,
270
+ 3266,
271
+ 3267,
272
+ 19,
273
+ 20,
274
+ 3268,
275
+ 3269,
276
+ 21,
277
+ 3270,
278
+ 3271,
279
+ 22,
280
+ 3272,
281
+ 3273,
282
+ 23,
283
+ 3274,
284
+ 24,
285
+ 25,
286
+ 3275
287
+ ],
288
+ "id_column": "sentence_pair_id"
289
+ }
290
+ },
291
+ "num_fewshot": 15,
292
+ "metric_list": [
293
+ {
294
+ "metric": "pearson",
295
+ "aggregation": "pearsonr",
296
+ "higher_is_better": true
297
+ },
298
+ {
299
+ "metric": "mse",
300
+ "aggregation": "mean_squared_error",
301
+ "higher_is_better": false
302
+ }
303
+ ],
304
+ "output_type": "generate_until",
305
+ "generation_kwargs": {
306
+ "do_sample": false,
307
+ "temperature": 0.0,
308
+ "top_k": null,
309
+ "top_p": null,
310
+ "until": [
311
+ "\n\n"
312
+ ]
313
+ },
314
+ "repeats": 1,
315
+ "filter_list": [
316
+ {
317
+ "name": "all",
318
+ "filter": [
319
+ {
320
+ "function": "number_filter",
321
+ "type": "float",
322
+ "range_min": 1.0,
323
+ "range_max": 5.0,
324
+ "on_outside_range": "clip",
325
+ "fallback": 5.0
326
+ },
327
+ {
328
+ "function": "take_first"
329
+ }
330
+ ]
331
+ }
332
+ ],
333
+ "should_decontaminate": false,
334
+ "metadata": {
335
+ "version": 1.1
336
+ }
337
+ },
338
+ "bluex": {
339
+ "task": "bluex",
340
+ "group": [
341
+ "pt_benchmark",
342
+ "vestibular"
343
+ ],
344
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
345
+ "test_split": "train",
346
+ "fewshot_split": "train",
347
+ "doc_to_text": "<function enem_doc_to_text at 0x7f753d173920>",
348
+ "doc_to_target": "{{answerKey}}",
349
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
350
+ "target_delimiter": " ",
351
+ "fewshot_delimiter": "\n\n",
352
+ "fewshot_config": {
353
+ "sampler": "id_sampler",
354
+ "sampler_config": {
355
+ "id_list": [
356
+ "USP_2018_3",
357
+ "UNICAMP_2018_2",
358
+ "USP_2018_35",
359
+ "UNICAMP_2018_16",
360
+ "USP_2018_89"
361
+ ],
362
+ "id_column": "id",
363
+ "exclude_from_task": true
364
+ }
365
+ },
366
+ "num_fewshot": 3,
367
+ "metric_list": [
368
+ {
369
+ "metric": "acc",
370
+ "aggregation": "acc",
371
+ "higher_is_better": true
372
+ }
373
+ ],
374
+ "output_type": "generate_until",
375
+ "generation_kwargs": {
376
+ "do_sample": false,
377
+ "temperature": 0.0,
378
+ "top_k": null,
379
+ "top_p": null,
380
+ "until": [
381
+ "\n\n"
382
+ ]
383
+ },
384
+ "repeats": 1,
385
+ "filter_list": [
386
+ {
387
+ "name": "all",
388
+ "filter": [
389
+ {
390
+ "function": "normalize_spaces"
391
+ },
392
+ {
393
+ "function": "remove_accents"
394
+ },
395
+ {
396
+ "function": "find_choices",
397
+ "choices": [
398
+ "A",
399
+ "B",
400
+ "C",
401
+ "D",
402
+ "E"
403
+ ],
404
+ "regex_patterns": [
405
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
406
+ "\\b([ABCDE])\\.",
407
+ "\\b([ABCDE]) ?[.):-]",
408
+ "\\b([ABCDE])$",
409
+ "\\b([ABCDE])\\b"
410
+ ]
411
+ },
412
+ {
413
+ "function": "take_first"
414
+ }
415
+ ],
416
+ "group_by": {
417
+ "column": "exam_id"
418
+ }
419
+ }
420
+ ],
421
+ "should_decontaminate": true,
422
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f753d173c40>",
423
+ "metadata": {
424
+ "version": 1.1
425
+ }
426
+ },
427
+ "enem_challenge": {
428
+ "task": "enem_challenge",
429
+ "task_alias": "enem",
430
+ "group": [
431
+ "pt_benchmark",
432
+ "vestibular"
433
+ ],
434
+ "dataset_path": "eduagarcia/enem_challenge",
435
+ "test_split": "train",
436
+ "fewshot_split": "train",
437
+ "doc_to_text": "<function enem_doc_to_text at 0x7f753cfac4a0>",
438
+ "doc_to_target": "{{answerKey}}",
439
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
440
+ "target_delimiter": " ",
441
+ "fewshot_delimiter": "\n\n",
442
+ "fewshot_config": {
443
+ "sampler": "id_sampler",
444
+ "sampler_config": {
445
+ "id_list": [
446
+ "2022_21",
447
+ "2022_88",
448
+ "2022_143"
449
+ ],
450
+ "id_column": "id",
451
+ "exclude_from_task": true
452
+ }
453
+ },
454
+ "num_fewshot": 3,
455
+ "metric_list": [
456
+ {
457
+ "metric": "acc",
458
+ "aggregation": "acc",
459
+ "higher_is_better": true
460
+ }
461
+ ],
462
+ "output_type": "generate_until",
463
+ "generation_kwargs": {
464
+ "do_sample": false,
465
+ "temperature": 0.0,
466
+ "top_k": null,
467
+ "top_p": null,
468
+ "until": [
469
+ "\n\n"
470
+ ]
471
+ },
472
+ "repeats": 1,
473
+ "filter_list": [
474
+ {
475
+ "name": "all",
476
+ "filter": [
477
+ {
478
+ "function": "normalize_spaces"
479
+ },
480
+ {
481
+ "function": "remove_accents"
482
+ },
483
+ {
484
+ "function": "find_choices",
485
+ "choices": [
486
+ "A",
487
+ "B",
488
+ "C",
489
+ "D",
490
+ "E"
491
+ ],
492
+ "regex_patterns": [
493
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
494
+ "\\b([ABCDE])\\.",
495
+ "\\b([ABCDE]) ?[.):-]",
496
+ "\\b([ABCDE])$",
497
+ "\\b([ABCDE])\\b"
498
+ ]
499
+ },
500
+ {
501
+ "function": "take_first"
502
+ }
503
+ ],
504
+ "group_by": {
505
+ "column": "exam_id"
506
+ }
507
+ }
508
+ ],
509
+ "should_decontaminate": true,
510
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f753cfac7c0>",
511
+ "metadata": {
512
+ "version": 1.1
513
+ }
514
+ },
515
+ "faquad_nli": {
516
+ "task": "faquad_nli",
517
+ "group": [
518
+ "pt_benchmark"
519
+ ],
520
+ "dataset_path": "ruanchaves/faquad-nli",
521
+ "test_split": "test",
522
+ "fewshot_split": "train",
523
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
524
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
525
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
526
+ "target_delimiter": " ",
527
+ "fewshot_delimiter": "\n\n",
528
+ "fewshot_config": {
529
+ "sampler": "first_n",
530
+ "sampler_config": {
531
+ "fewshot_indices": [
532
+ 1893,
533
+ 949,
534
+ 663,
535
+ 105,
536
+ 1169,
537
+ 2910,
538
+ 2227,
539
+ 2813,
540
+ 974,
541
+ 558,
542
+ 1503,
543
+ 1958,
544
+ 2918,
545
+ 601,
546
+ 1560,
547
+ 984,
548
+ 2388,
549
+ 995,
550
+ 2233,
551
+ 1982,
552
+ 165,
553
+ 2788,
554
+ 1312,
555
+ 2285,
556
+ 522,
557
+ 1113,
558
+ 1670,
559
+ 323,
560
+ 236,
561
+ 1263,
562
+ 1562,
563
+ 2519,
564
+ 1049,
565
+ 432,
566
+ 1167,
567
+ 1394,
568
+ 2022,
569
+ 2551,
570
+ 2194,
571
+ 2187,
572
+ 2282,
573
+ 2816,
574
+ 108,
575
+ 301,
576
+ 1185,
577
+ 1315,
578
+ 1420,
579
+ 2436,
580
+ 2322,
581
+ 766
582
+ ]
583
+ }
584
+ },
585
+ "num_fewshot": 15,
586
+ "metric_list": [
587
+ {
588
+ "metric": "f1_macro",
589
+ "aggregation": "f1_macro",
590
+ "higher_is_better": true
591
+ },
592
+ {
593
+ "metric": "acc",
594
+ "aggregation": "acc",
595
+ "higher_is_better": true
596
+ }
597
+ ],
598
+ "output_type": "generate_until",
599
+ "generation_kwargs": {
600
+ "do_sample": false,
601
+ "temperature": 0.0,
602
+ "top_k": null,
603
+ "top_p": null,
604
+ "until": [
605
+ "\n\n"
606
+ ]
607
+ },
608
+ "repeats": 1,
609
+ "filter_list": [
610
+ {
611
+ "name": "all",
612
+ "filter": [
613
+ {
614
+ "function": "find_similar_label",
615
+ "labels": [
616
+ "Sim",
617
+ "Não"
618
+ ]
619
+ },
620
+ {
621
+ "function": "take_first"
622
+ }
623
+ ]
624
+ }
625
+ ],
626
+ "should_decontaminate": false,
627
+ "metadata": {
628
+ "version": 1.1
629
+ }
630
+ },
631
+ "hatebr_offensive": {
632
+ "task": "hatebr_offensive",
633
+ "task_alias": "hatebr_offensive_binary",
634
+ "group": [
635
+ "pt_benchmark"
636
+ ],
637
+ "dataset_path": "eduagarcia/portuguese_benchmark",
638
+ "dataset_name": "HateBR_offensive_binary",
639
+ "test_split": "test",
640
+ "fewshot_split": "train",
641
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
642
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
643
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
644
+ "target_delimiter": " ",
645
+ "fewshot_delimiter": "\n\n",
646
+ "fewshot_config": {
647
+ "sampler": "id_sampler",
648
+ "sampler_config": {
649
+ "id_list": [
650
+ 48,
651
+ 44,
652
+ 36,
653
+ 20,
654
+ 3511,
655
+ 88,
656
+ 3555,
657
+ 16,
658
+ 56,
659
+ 3535,
660
+ 60,
661
+ 40,
662
+ 3527,
663
+ 4,
664
+ 76,
665
+ 3579,
666
+ 3523,
667
+ 3551,
668
+ 68,
669
+ 3503,
670
+ 84,
671
+ 3539,
672
+ 64,
673
+ 3599,
674
+ 80,
675
+ 3563,
676
+ 3559,
677
+ 3543,
678
+ 3547,
679
+ 3587,
680
+ 3595,
681
+ 3575,
682
+ 3567,
683
+ 3591,
684
+ 24,
685
+ 96,
686
+ 92,
687
+ 3507,
688
+ 52,
689
+ 72,
690
+ 8,
691
+ 3571,
692
+ 3515,
693
+ 3519,
694
+ 3531,
695
+ 28,
696
+ 32,
697
+ 0,
698
+ 12,
699
+ 3583
700
+ ],
701
+ "id_column": "idx"
702
+ }
703
+ },
704
+ "num_fewshot": 25,
705
+ "metric_list": [
706
+ {
707
+ "metric": "f1_macro",
708
+ "aggregation": "f1_macro",
709
+ "higher_is_better": true
710
+ },
711
+ {
712
+ "metric": "acc",
713
+ "aggregation": "acc",
714
+ "higher_is_better": true
715
+ }
716
+ ],
717
+ "output_type": "generate_until",
718
+ "generation_kwargs": {
719
+ "do_sample": false,
720
+ "temperature": 0.0,
721
+ "top_k": null,
722
+ "top_p": null,
723
+ "until": [
724
+ "\n\n"
725
+ ]
726
+ },
727
+ "repeats": 1,
728
+ "filter_list": [
729
+ {
730
+ "name": "all",
731
+ "filter": [
732
+ {
733
+ "function": "find_similar_label",
734
+ "labels": [
735
+ "Sim",
736
+ "Não"
737
+ ]
738
+ },
739
+ {
740
+ "function": "take_first"
741
+ }
742
+ ]
743
+ }
744
+ ],
745
+ "should_decontaminate": false,
746
+ "metadata": {
747
+ "version": 1.0
748
+ }
749
+ },
750
+ "oab_exams": {
751
+ "task": "oab_exams",
752
+ "group": [
753
+ "legal_benchmark",
754
+ "pt_benchmark"
755
+ ],
756
+ "dataset_path": "eduagarcia/oab_exams",
757
+ "test_split": "train",
758
+ "fewshot_split": "train",
759
+ "doc_to_text": "<function doc_to_text at 0x7f753d1732e0>",
760
+ "doc_to_target": "{{answerKey}}",
761
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
762
+ "target_delimiter": " ",
763
+ "fewshot_delimiter": "\n\n",
764
+ "fewshot_config": {
765
+ "sampler": "id_sampler",
766
+ "sampler_config": {
767
+ "id_list": [
768
+ "2010-01_1",
769
+ "2010-01_11",
770
+ "2010-01_13",
771
+ "2010-01_23",
772
+ "2010-01_26",
773
+ "2010-01_28",
774
+ "2010-01_38",
775
+ "2010-01_48",
776
+ "2010-01_58",
777
+ "2010-01_68",
778
+ "2010-01_76",
779
+ "2010-01_83",
780
+ "2010-01_85",
781
+ "2010-01_91",
782
+ "2010-01_99"
783
+ ],
784
+ "id_column": "id",
785
+ "exclude_from_task": true
786
+ }
787
+ },
788
+ "num_fewshot": 3,
789
+ "metric_list": [
790
+ {
791
+ "metric": "acc",
792
+ "aggregation": "acc",
793
+ "higher_is_better": true
794
+ }
795
+ ],
796
+ "output_type": "generate_until",
797
+ "generation_kwargs": {
798
+ "max_gen_toks": 32,
799
+ "do_sample": false,
800
+ "temperature": 0.0,
801
+ "top_k": null,
802
+ "top_p": null,
803
+ "until": [
804
+ "\n\n"
805
+ ]
806
+ },
807
+ "repeats": 1,
808
+ "filter_list": [
809
+ {
810
+ "name": "all",
811
+ "filter": [
812
+ {
813
+ "function": "normalize_spaces"
814
+ },
815
+ {
816
+ "function": "remove_accents"
817
+ },
818
+ {
819
+ "function": "find_choices",
820
+ "choices": [
821
+ "A",
822
+ "B",
823
+ "C",
824
+ "D"
825
+ ],
826
+ "regex_patterns": [
827
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
828
+ "\\b([ABCD])\\.",
829
+ "\\b([ABCD]) ?[.):-]",
830
+ "\\b([ABCD])$",
831
+ "\\b([ABCD])\\b"
832
+ ]
833
+ },
834
+ {
835
+ "function": "take_first"
836
+ }
837
+ ],
838
+ "group_by": {
839
+ "column": "exam_id"
840
+ }
841
+ }
842
+ ],
843
+ "should_decontaminate": true,
844
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f753d173420>",
845
+ "metadata": {
846
+ "version": 1.5
847
+ }
848
+ },
849
+ "portuguese_hate_speech": {
850
+ "task": "portuguese_hate_speech",
851
+ "task_alias": "portuguese_hate_speech_binary",
852
+ "group": [
853
+ "pt_benchmark"
854
+ ],
855
+ "dataset_path": "eduagarcia/portuguese_benchmark",
856
+ "dataset_name": "Portuguese_Hate_Speech_binary",
857
+ "test_split": "test",
858
+ "fewshot_split": "train",
859
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
860
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
861
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
862
+ "target_delimiter": " ",
863
+ "fewshot_delimiter": "\n\n",
864
+ "fewshot_config": {
865
+ "sampler": "id_sampler",
866
+ "sampler_config": {
867
+ "id_list": [
868
+ 52,
869
+ 50,
870
+ 39,
871
+ 28,
872
+ 3,
873
+ 105,
874
+ 22,
875
+ 25,
876
+ 60,
877
+ 11,
878
+ 66,
879
+ 41,
880
+ 9,
881
+ 4,
882
+ 91,
883
+ 42,
884
+ 7,
885
+ 20,
886
+ 76,
887
+ 1,
888
+ 104,
889
+ 13,
890
+ 67,
891
+ 54,
892
+ 97,
893
+ 27,
894
+ 24,
895
+ 14,
896
+ 16,
897
+ 48,
898
+ 53,
899
+ 40,
900
+ 34,
901
+ 49,
902
+ 32,
903
+ 119,
904
+ 114,
905
+ 2,
906
+ 58,
907
+ 83,
908
+ 18,
909
+ 36,
910
+ 5,
911
+ 6,
912
+ 10,
913
+ 35,
914
+ 38,
915
+ 0,
916
+ 21,
917
+ 46
918
+ ],
919
+ "id_column": "idx"
920
+ }
921
+ },
922
+ "num_fewshot": 25,
923
+ "metric_list": [
924
+ {
925
+ "metric": "f1_macro",
926
+ "aggregation": "f1_macro",
927
+ "higher_is_better": true
928
+ },
929
+ {
930
+ "metric": "acc",
931
+ "aggregation": "acc",
932
+ "higher_is_better": true
933
+ }
934
+ ],
935
+ "output_type": "generate_until",
936
+ "generation_kwargs": {
937
+ "do_sample": false,
938
+ "temperature": 0.0,
939
+ "top_k": null,
940
+ "top_p": null,
941
+ "until": [
942
+ "\n\n"
943
+ ]
944
+ },
945
+ "repeats": 1,
946
+ "filter_list": [
947
+ {
948
+ "name": "all",
949
+ "filter": [
950
+ {
951
+ "function": "find_similar_label",
952
+ "labels": [
953
+ "Sim",
954
+ "Não"
955
+ ]
956
+ },
957
+ {
958
+ "function": "take_first"
959
+ }
960
+ ]
961
+ }
962
+ ],
963
+ "should_decontaminate": false,
964
+ "metadata": {
965
+ "version": 1.0
966
+ }
967
+ },
968
+ "tweetsentbr": {
969
+ "task": "tweetsentbr",
970
+ "group": [
971
+ "pt_benchmark"
972
+ ],
973
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
974
+ "test_split": "test",
975
+ "fewshot_split": "train",
976
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
977
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
978
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
979
+ "target_delimiter": " ",
980
+ "fewshot_delimiter": "\n\n",
981
+ "fewshot_config": {
982
+ "sampler": "first_n"
983
+ },
984
+ "num_fewshot": 25,
985
+ "metric_list": [
986
+ {
987
+ "metric": "f1_macro",
988
+ "aggregation": "f1_macro",
989
+ "higher_is_better": true
990
+ },
991
+ {
992
+ "metric": "acc",
993
+ "aggregation": "acc",
994
+ "higher_is_better": true
995
+ }
996
+ ],
997
+ "output_type": "generate_until",
998
+ "generation_kwargs": {
999
+ "do_sample": false,
1000
+ "temperature": 0.0,
1001
+ "top_k": null,
1002
+ "top_p": null,
1003
+ "until": [
1004
+ "\n\n"
1005
+ ]
1006
+ },
1007
+ "repeats": 1,
1008
+ "filter_list": [
1009
+ {
1010
+ "name": "all",
1011
+ "filter": [
1012
+ {
1013
+ "function": "find_similar_label",
1014
+ "labels": [
1015
+ "Positivo",
1016
+ "Neutro",
1017
+ "Negativo"
1018
+ ]
1019
+ },
1020
+ {
1021
+ "function": "take_first"
1022
+ }
1023
+ ]
1024
+ }
1025
+ ],
1026
+ "should_decontaminate": false,
1027
+ "metadata": {
1028
+ "version": 1.0
1029
+ }
1030
+ }
1031
+ },
1032
+ "versions": {
1033
+ "assin2_rte": 1.1,
1034
+ "assin2_sts": 1.1,
1035
+ "bluex": 1.1,
1036
+ "enem_challenge": 1.1,
1037
+ "faquad_nli": 1.1,
1038
+ "hatebr_offensive": 1.0,
1039
+ "oab_exams": 1.5,
1040
+ "portuguese_hate_speech": 1.0,
1041
+ "tweetsentbr": 1.0
1042
+ },
1043
+ "n-shot": {
1044
+ "assin2_rte": 15,
1045
+ "assin2_sts": 15,
1046
+ "bluex": 3,
1047
+ "enem_challenge": 3,
1048
+ "faquad_nli": 15,
1049
+ "hatebr_offensive": 25,
1050
+ "oab_exams": 3,
1051
+ "portuguese_hate_speech": 25,
1052
+ "tweetsentbr": 25
1053
+ },
1054
+ "model_meta": {
1055
+ "n_gpus": 1,
1056
+ "model_dtype": "torch.bfloat16",
1057
+ "model_is_loaded_in_4bit": null,
1058
+ "model_is_loaded_in_8bit": null,
1059
+ "model_is_quantized": false,
1060
+ "model_quantization": null,
1061
+ "model_sha": "9ac0fbee445e7755e50520e9881d67588b4b854c",
1062
+ "batch_size": "auto",
1063
+ "max_length": 4098,
1064
+ "max_gen_toks": 2048,
1065
+ "until": null,
1066
+ "gen_kwargs": {},
1067
+ "effective_batch_size": 7075.0
1068
+ },
1069
+ "task_model_meta": {
1070
+ "assin2_rte": {
1071
+ "sample_size": 2448
1072
+ },
1073
+ "assin2_sts": {
1074
+ "sample_size": 2448
1075
+ },
1076
+ "bluex": {
1077
+ "sample_size": 719
1078
+ },
1079
+ "enem_challenge": {
1080
+ "sample_size": 1429
1081
+ },
1082
+ "faquad_nli": {
1083
+ "sample_size": 650
1084
+ },
1085
+ "hatebr_offensive": {
1086
+ "sample_size": 1400
1087
+ },
1088
+ "oab_exams": {
1089
+ "sample_size": 2195
1090
+ },
1091
+ "portuguese_hate_speech": {
1092
+ "sample_size": 851
1093
+ },
1094
+ "tweetsentbr": {
1095
+ "sample_size": 2010
1096
+ }
1097
+ },
1098
+ "config": {
1099
+ "model": "vllm",
1100
+ "model_args": "pretrained=princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2,dtype=bfloat16,revision=9ac0fbee445e7755e50520e9881d67588b4b854c,trust_remote_code=True,max_length=4098",
1101
+ "batch_size": "auto",
1102
+ "batch_sizes": [],
1103
+ "device": null,
1104
+ "use_cache": null,
1105
+ "limit": [
1106
+ null,
1107
+ null,
1108
+ null,
1109
+ null,
1110
+ null,
1111
+ null,
1112
+ null,
1113
+ null,
1114
+ null
1115
+ ],
1116
+ "bootstrap_iters": 0,
1117
+ "gen_kwargs": null
1118
+ },
1119
+ "git_hash": "cf515f2"
1120
+ }
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2024-08-11T19-34-03.542478.json CHANGED
@@ -34,29 +34,29 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6840871354825482,
38
- "all_grouped_npm": 0.5325034905256527,
39
  "all_grouped": {
40
  "enem_challenge": 0.6578026592022393,
41
  "bluex": 0.5591098748261474,
42
  "oab_exams": 0.4733485193621868,
43
- "assin2_rte": 0.896457973466223,
44
  "assin2_sts": 0.6991081985044032,
45
- "faquad_nli": 0.664955263315919,
46
- "hatebr_offensive": 0.8632511993787806,
47
- "portuguese_hate_speech": 0.6911243989372992,
48
- "tweetsentbr": 0.6516261323497358
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6578026592022393,
52
  "harness|bluex|bluex|None|3": 0.5591098748261474,
53
  "harness|oab_exams|oab_exams|None|3": 0.4733485193621868,
54
- "harness|assin2_rte|assin2_rte|None|15": 0.896457973466223,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.6991081985044032,
56
- "harness|faquad_nli|faquad_nli|None|15": 0.664955263315919,
57
- "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8632511993787806,
58
- "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6911243989372992,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.6516261323497358
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6578026592022393,
@@ -125,9 +125,9 @@
125
  "main_score": 0.4733485193621868
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
- "f1_macro,all": 0.896457973466223,
129
  "acc,all": 0.8905228758169934,
130
- "main_score": 0.896457973466223
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.6991081985044032,
@@ -135,24 +135,24 @@
135
  "main_score": 0.6991081985044032
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
- "f1_macro,all": 0.664955263315919,
139
  "acc,all": 0.6892307692307692,
140
- "main_score": 0.664955263315919
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
- "f1_macro,all": 0.8632511993787806,
144
  "acc,all": 0.86,
145
- "main_score": 0.8632511993787806
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
- "f1_macro,all": 0.6911243989372992,
149
  "acc,all": 0.7050528789659224,
150
- "main_score": 0.6911243989372992
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.6516261323497358,
154
  "acc,all": 0.7089552238805971,
155
- "main_score": 0.6516261323497358
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.5505868230988251,
38
+ "all_grouped_npm": 0.2808161481879235,
39
  "all_grouped": {
40
  "enem_challenge": 0.6578026592022393,
41
  "bluex": 0.5591098748261474,
42
  "oab_exams": 0.4733485193621868,
43
+ "assin2_rte": 0.597638648977482,
44
  "assin2_sts": 0.6991081985044032,
45
+ "faquad_nli": 0.4433035088772794,
46
+ "hatebr_offensive": 0.5755007995858538,
47
+ "portuguese_hate_speech": 0.46074959929153286,
48
+ "tweetsentbr": 0.4887195992623018
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6578026592022393,
52
  "harness|bluex|bluex|None|3": 0.5591098748261474,
53
  "harness|oab_exams|oab_exams|None|3": 0.4733485193621868,
54
+ "harness|assin2_rte|assin2_rte|None|15": 0.597638648977482,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.6991081985044032,
56
+ "harness|faquad_nli|faquad_nli|None|15": 0.4433035088772794,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5755007995858538,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.46074959929153286,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.4887195992623018
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6578026592022393,
 
125
  "main_score": 0.4733485193621868
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
+ "f1_macro,all": 0.597638648977482,
129
  "acc,all": 0.8905228758169934,
130
+ "main_score": 0.597638648977482
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.6991081985044032,
 
135
  "main_score": 0.6991081985044032
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
+ "f1_macro,all": 0.4433035088772794,
139
  "acc,all": 0.6892307692307692,
140
+ "main_score": 0.4433035088772794
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.5755007995858538,
144
  "acc,all": 0.86,
145
+ "main_score": 0.5755007995858538
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.46074959929153286,
149
  "acc,all": 0.7050528789659224,
150
+ "main_score": 0.46074959929153286
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.4887195992623018,
154
  "acc,all": 0.7089552238805971,
155
+ "main_score": 0.4887195992623018
156
  }
157
  },
158
  "config_tasks": {
princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2/results_2025-05-07T15-51-57.421344.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config_general": {
3
+ "start_date": "2025-05-07T15-51-57.421344",
4
+ "start_time": 1746633118.4962575,
5
+ "end_time": 1746634318.4508138,
6
+ "total_evaluation_time_seconds": 1199.9545562267303,
7
+ "n_gpus": 1,
8
+ "model_dtype": "bfloat16",
9
+ "model_is_loaded_in_4bit": null,
10
+ "model_is_loaded_in_8bit": null,
11
+ "model_is_quantized": false,
12
+ "model_quantization": null,
13
+ "model_sha": "9ac0fbee445e7755e50520e9881d67588b4b854c",
14
+ "batch_size": "auto",
15
+ "max_length": 4098,
16
+ "max_gen_toks": 2048,
17
+ "until": null,
18
+ "gen_kwargs": {},
19
+ "effective_batch_size": 7075.0,
20
+ "model_name": "princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2",
21
+ "job_id": 1625,
22
+ "model_id": "princeton-nlp/Llama-3-Instruct-8B-SimPO-v0.2_eval_request_9ac0fbe_False_bfloat16_Original",
23
+ "model_base_model": "",
24
+ "model_weight_type": "Original",
25
+ "model_revision": "9ac0fbee445e7755e50520e9881d67588b4b854c",
26
+ "model_private": false,
27
+ "model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
28
+ "model_architectures": "LlamaForCausalLM",
29
+ "submitted_time": "2024-09-21T04:19:04Z",
30
+ "lm_eval_model_type": "vllm",
31
+ "eval_version": "1.1.0"
32
+ },
33
+ "results": {
34
+ "all_grouped_average": 0.6713294718134009,
35
+ "all_grouped_npm": 0.5112432540796013,
36
+ "all_grouped": {
37
+ "enem_challenge": 0.6543037088873338,
38
+ "bluex": 0.5424200278164116,
39
+ "oab_exams": 0.46378132118451026,
40
+ "assin2_rte": 0.8739668044845157,
41
+ "assin2_sts": 0.6849205578626394,
42
+ "faquad_nli": 0.6179372835523507,
43
+ "hatebr_offensive": 0.8419469928644241,
44
+ "portuguese_hate_speech": 0.7047160293321522,
45
+ "tweetsentbr": 0.6579725203362695
46
+ },
47
+ "all": {
48
+ "harness|enem_challenge|enem_challenge|None|3": 0.6543037088873338,
49
+ "harness|bluex|bluex|None|3": 0.5424200278164116,
50
+ "harness|oab_exams|oab_exams|None|3": 0.46378132118451026,
51
+ "harness|assin2_rte|assin2_rte|None|15": 0.8739668044845157,
52
+ "harness|assin2_sts|assin2_sts|None|15": 0.6849205578626394,
53
+ "harness|faquad_nli|faquad_nli|None|15": 0.6179372835523507,
54
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8419469928644241,
55
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7047160293321522,
56
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6579725203362695
57
+ },
58
+ "harness|enem_challenge|enem_challenge|None|3": {
59
+ "acc,all": 0.6543037088873338,
60
+ "acc,exam_id__2010": 0.7008547008547008,
61
+ "acc,exam_id__2015": 0.7226890756302521,
62
+ "acc,exam_id__2012": 0.6379310344827587,
63
+ "acc,exam_id__2009": 0.6956521739130435,
64
+ "acc,exam_id__2016_2": 0.5772357723577236,
65
+ "acc,exam_id__2016": 0.6033057851239669,
66
+ "acc,exam_id__2017": 0.6551724137931034,
67
+ "acc,exam_id__2014": 0.6330275229357798,
68
+ "acc,exam_id__2022": 0.6165413533834586,
69
+ "acc,exam_id__2023": 0.6814814814814815,
70
+ "acc,exam_id__2011": 0.7008547008547008,
71
+ "acc,exam_id__2013": 0.6296296296296297,
72
+ "main_score": 0.6543037088873338
73
+ },
74
+ "harness|bluex|bluex|None|3": {
75
+ "acc,all": 0.5424200278164116,
76
+ "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921,
77
+ "acc,exam_id__UNICAMP_2018": 0.46296296296296297,
78
+ "acc,exam_id__USP_2024": 0.6341463414634146,
79
+ "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
80
+ "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
81
+ "acc,exam_id__USP_2020": 0.6071428571428571,
82
+ "acc,exam_id__UNICAMP_2022": 0.48717948717948717,
83
+ "acc,exam_id__UNICAMP_2024": 0.5555555555555556,
84
+ "acc,exam_id__USP_2021": 0.5769230769230769,
85
+ "acc,exam_id__USP_2018": 0.46296296296296297,
86
+ "acc,exam_id__UNICAMP_2020": 0.5454545454545454,
87
+ "acc,exam_id__USP_2022": 0.5306122448979592,
88
+ "acc,exam_id__USP_2023": 0.6136363636363636,
89
+ "acc,exam_id__USP_2019": 0.5,
90
+ "acc,exam_id__UNICAMP_2019": 0.56,
91
+ "main_score": 0.5424200278164116
92
+ },
93
+ "harness|oab_exams|oab_exams|None|3": {
94
+ "acc,all": 0.46378132118451026,
95
+ "acc,exam_id__2012-08": 0.4625,
96
+ "acc,exam_id__2015-16": 0.4625,
97
+ "acc,exam_id__2014-13": 0.475,
98
+ "acc,exam_id__2011-03": 0.36363636363636365,
99
+ "acc,exam_id__2016-19": 0.5641025641025641,
100
+ "acc,exam_id__2012-07": 0.4875,
101
+ "acc,exam_id__2011-05": 0.45,
102
+ "acc,exam_id__2013-12": 0.5,
103
+ "acc,exam_id__2017-23": 0.4375,
104
+ "acc,exam_id__2016-21": 0.4,
105
+ "acc,exam_id__2016-20": 0.5125,
106
+ "acc,exam_id__2013-10": 0.3875,
107
+ "acc,exam_id__2010-01": 0.3764705882352941,
108
+ "acc,exam_id__2011-04": 0.4125,
109
+ "acc,exam_id__2015-17": 0.5897435897435898,
110
+ "acc,exam_id__2017-22": 0.525,
111
+ "acc,exam_id__2016-20a": 0.3875,
112
+ "acc,exam_id__2017-24": 0.475,
113
+ "acc,exam_id__2015-18": 0.4375,
114
+ "acc,exam_id__2014-14": 0.5375,
115
+ "acc,exam_id__2013-11": 0.45,
116
+ "acc,exam_id__2012-06a": 0.4625,
117
+ "acc,exam_id__2010-02": 0.45,
118
+ "acc,exam_id__2012-06": 0.4125,
119
+ "acc,exam_id__2014-15": 0.6282051282051282,
120
+ "acc,exam_id__2018-25": 0.475,
121
+ "acc,exam_id__2012-09": 0.44155844155844154,
122
+ "main_score": 0.46378132118451026
123
+ },
124
+ "harness|assin2_rte|assin2_rte|None|15": {
125
+ "f1_macro,all": 0.8739668044845157,
126
+ "acc,all": 0.8574346405228758,
127
+ "main_score": 0.8739668044845157
128
+ },
129
+ "harness|assin2_sts|assin2_sts|None|15": {
130
+ "pearson,all": 0.6849205578626394,
131
+ "mse,all": 1.0289630421918505,
132
+ "main_score": 0.6849205578626394
133
+ },
134
+ "harness|faquad_nli|faquad_nli|None|15": {
135
+ "f1_macro,all": 0.6179372835523507,
136
+ "acc,all": 0.5938461538461538,
137
+ "main_score": 0.6179372835523507
138
+ },
139
+ "harness|hatebr_offensive|hatebr_offensive|None|25": {
140
+ "f1_macro,all": 0.8419469928644241,
141
+ "acc,all": 0.8264285714285714,
142
+ "main_score": 0.8419469928644241
143
+ },
144
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
145
+ "f1_macro,all": 0.7047160293321522,
146
+ "acc,all": 0.7038777908343126,
147
+ "main_score": 0.7047160293321522
148
+ },
149
+ "harness|tweetsentbr|tweetsentbr|None|25": {
150
+ "f1_macro,all": 0.6579725203362695,
151
+ "acc,all": 0.7079601990049751,
152
+ "main_score": 0.6579725203362695
153
+ }
154
+ },
155
+ "config_tasks": {
156
+ "harness|enem_challenge|enem_challenge": "LM Harness task",
157
+ "harness|bluex|bluex": "LM Harness task",
158
+ "harness|oab_exams|oab_exams": "LM Harness task",
159
+ "harness|assin2_rte|assin2_rte": "LM Harness task",
160
+ "harness|assin2_sts|assin2_sts": "LM Harness task",
161
+ "harness|faquad_nli|faquad_nli": "LM Harness task",
162
+ "harness|hatebr_offensive|hatebr_offensive": "LM Harness task",
163
+ "harness|portuguese_hate_speech|portuguese_hate_speech": "LM Harness task",
164
+ "harness|tweetsentbr|tweetsentbr": "LM Harness task"
165
+ },
166
+ "versions": {
167
+ "all": 0,
168
+ "harness|enem_challenge|enem_challenge": 1.1,
169
+ "harness|bluex|bluex": 1.1,
170
+ "harness|oab_exams|oab_exams": 1.5,
171
+ "harness|assin2_rte|assin2_rte": 1.1,
172
+ "harness|assin2_sts|assin2_sts": 1.1,
173
+ "harness|faquad_nli|faquad_nli": 1.1,
174
+ "harness|hatebr_offensive|hatebr_offensive": 1.0,
175
+ "harness|portuguese_hate_speech|portuguese_hate_speech": 1.0,
176
+ "harness|tweetsentbr|tweetsentbr": 1.0
177
+ },
178
+ "summary_tasks": {
179
+ "harness|enem_challenge|enem_challenge|None|3": {
180
+ "sample_size": 1429
181
+ },
182
+ "harness|bluex|bluex|None|3": {
183
+ "sample_size": 719
184
+ },
185
+ "harness|oab_exams|oab_exams|None|3": {
186
+ "sample_size": 2195
187
+ },
188
+ "harness|assin2_rte|assin2_rte|None|15": {
189
+ "sample_size": 2448
190
+ },
191
+ "harness|assin2_sts|assin2_sts|None|15": {
192
+ "sample_size": 2448
193
+ },
194
+ "harness|faquad_nli|faquad_nli|None|15": {
195
+ "sample_size": 650
196
+ },
197
+ "harness|hatebr_offensive|hatebr_offensive|None|25": {
198
+ "sample_size": 1400
199
+ },
200
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
201
+ "sample_size": 851
202
+ },
203
+ "harness|tweetsentbr|tweetsentbr|None|25": {
204
+ "sample_size": 2010
205
+ }
206
+ },
207
+ "summary_general": {}
208
+ }