Dataset Viewer
The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'test' of the config 'default' of the dataset.
Error code: FeaturesError Exception: ArrowInvalid Message: Schema at index 1 was different: config_general: struct<start_date: string, start_time: double, end_time: double, total_evaluation_time_seconds: double, has_chat_template: bool, chat_type: string, n_gpus: int64, accelerate_num_process: null, model_sha: string, model_dtype: string, model_memory_footprint: int64, model_num_parameters: int64, model_is_loaded_in_4bit: null, model_is_loaded_in_8bit: null, model_is_quantized: null, model_device: string, batch_size: int64, max_length: int64, max_ctx_length: int64, max_gen_toks: int64, model_name: string, job_id: int64, model_id: string, model_base_model: string, model_weight_type: string, model_revision: string, model_private: bool, model_type: string, model_architectures: string, submitted_time: timestamp[s], lm_eval_model_type: string, eval_version: string> results: struct<all_grouped_average: double, all_grouped_npm: double, all_grouped: struct<enem_challenge: double, bluex: double, oab_exams: double, assin2_rte: double, assin2_sts: double, faquad_nli: double, hatebr_offensive: double, portuguese_hate_speech: double, tweetsentbr: double>, all: struct<harness|enem_challenge|enem_challenge|None|3: double, harness|bluex|bluex|None|3: double, harness|oab_exams|oab_exams|None|3: double, harness|assin2_rte|assin2_rte|None|15: double, harness|assin2_sts|assin2_sts|None|15: double, harness|faquad_nli|faquad_nli|None|15: double, harness|hatebr_offensive|hatebr_offensive|None|25: double, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: double, harness|tweetsentbr|tweetsentbr|None|25: double>, harness|enem_challenge|enem_challenge|None|3: struct<acc,all: double, acc,exam_id__2012: double, acc,exam_id__2014: double, acc,exam_id__2015: double, acc,exam_id__2016: double, acc,exam_id__2017: double, acc,exam_id__2009: double, acc,exam_id__2010: double, acc,exam_id__2023: double, acc,exam_id__2022: double, acc,exam_id__2016_2: double, acc,exam_id__2013: double, acc,exam_id__2011: double, main_score: double>, harness|bluex|bluex|None|3: struct<acc,all: double, acc,exam_id__UNICAMP_2021_2: double, acc,exam_id__USP_2019: double, acc,exam_id__USP_2021: double, acc,exam_id__UNICAMP_2020: double, acc,exam_id__USP_2020: double, acc,exam_id__USP_2018: double, acc,exam_id__USP_2024: double, acc,exam_id__UNICAMP_2019: double, acc,exam_id__USP_2022: double, acc,exam_id__UNICAMP_2018: double, acc,exam_id__UNICAMP_2022: double, acc,exam_id__UNICAMP_2024: double, acc,exam_id__UNICAMP_2021_1: double, acc,exam_id__USP_2023: double, acc,exam_id__UNICAMP_2023: double, main_score: double>, harness|oab_exams|oab_exams|None|3: struct<acc,all: double, acc,exam_id__2014-13: double, acc,exam_id__2015-17: double, acc,exam_id__2017-24: double, acc,exam_id__2016-21: double, acc,exam_id__2013-11: double, acc,exam_id__2010-02: double, acc,exam_id__2016-20: double, acc,exam_id__2016-19: double, acc,exam_id__2011-03: double, acc,exam_id__2017-23: double, acc,exam_id__2014-15: double, acc,exam_id__2012-07: double, acc,exam_id__2018-25: double, acc,exam_id__2012-06a: double, acc,exam_id__2013-12: double, acc,exam_id__2010-01: double, acc,exam_id__2016-20a: double, acc,exam_id__2011-05: double, acc,exam_id__2012-08: double, acc,exam_id__2015-16: double, acc,exam_id__2011-04: double, acc,exam_id__2014-14: double, acc,exam_id__2017-22: double, acc,exam_id__2013-10: double, acc,exam_id__2012-09: double, acc,exam_id__2012-06: double, acc,exam_id__2015-18: double, main_score: double>, harness|assin2_rte|assin2_rte|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|assin2_sts|assin2_sts|None|15: struct<pearson,all: double, mse,all: double, main_score: double>, harness|faquad_nli|faquad_nli|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>> config_tasks: struct<harness|enem_challenge|enem_challenge: string, harness|bluex|bluex: string, harness|oab_exams|oab_exams: string, harness|assin2_rte|assin2_rte: string, harness|assin2_sts|assin2_sts: string, harness|faquad_nli|faquad_nli: string, harness|hatebr_offensive|hatebr_offensive: string, harness|portuguese_hate_speech|portuguese_hate_speech: string, harness|tweetsentbr|tweetsentbr: string> versions: struct<all: int64, harness|enem_challenge|enem_challenge: double, harness|bluex|bluex: double, harness|oab_exams|oab_exams: double, harness|assin2_rte|assin2_rte: double, harness|assin2_sts|assin2_sts: double, harness|faquad_nli|faquad_nli: double, harness|hatebr_offensive|hatebr_offensive: double, harness|portuguese_hate_speech|portuguese_hate_speech: double, harness|tweetsentbr|tweetsentbr: double> summary_tasks: struct<harness|enem_challenge|enem_challenge|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|bluex|bluex|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|oab_exams|oab_exams|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_rte|assin2_rte|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_sts|assin2_sts|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|faquad_nli|faquad_nli|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>> summary_general: struct<truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64> vs config_general: struct<start_date: string, start_time: double, end_time: double, total_evaluation_time_seconds: double, has_chat_template: bool, chat_type: string, n_gpus: int64, accelerate_num_process: null, model_sha: string, model_dtype: string, model_memory_footprint: int64, model_num_parameters: int64, model_is_loaded_in_4bit: null, model_is_loaded_in_8bit: null, model_is_quantized: null, model_device: string, batch_size: int64, max_length: int64, max_ctx_length: int64, max_gen_toks: int64, model_name: string, job_id: int64, model_id: string, model_base_model: string, model_weight_type: string, model_revision: string, model_private: bool, model_type: string, model_architectures: string, submitted_time: timestamp[s], lm_eval_model_type: string, eval_version: string> results: struct<all_grouped_average: double, all_grouped_npm: double, all_grouped: struct<enem_challenge: double, bluex: double, oab_exams: double, assin2_rte: double, assin2_sts: double, faquad_nli: double, hatebr_offensive: double, portuguese_hate_speech: double, tweetsentbr: double>, all: struct<harness|enem_challenge|enem_challenge|None|3: double, harness|bluex|bluex|None|3: double, harness|oab_exams|oab_exams|None|3: double, harness|assin2_rte|assin2_rte|None|15: double, harness|assin2_sts|assin2_sts|None|15: double, harness|faquad_nli|faquad_nli|None|15: double, harness|hatebr_offensive|hatebr_offensive|None|25: double, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: double, harness|tweetsentbr|tweetsentbr|None|25: double>, harness|enem_challenge|enem_challenge|None|3: struct<acc,all: double, acc,exam_id__2012: double, acc,exam_id__2009: double, acc,exam_id__2017: double, acc,exam_id__2016: double, acc,exam_id__2022: double, acc,exam_id__2015: double, acc,exam_id__2023: double, acc,exam_id__2016_2: double, acc,exam_id__2014: double, acc,exam_id__2010: double, acc,exam_id__2011: double, acc,exam_id__2013: double, main_score: double>, harness|bluex|bluex|None|3: struct<acc,all: double, acc,exam_id__UNICAMP_2021_1: double, acc,exam_id__UNICAMP_2021_2: double, acc,exam_id__USP_2020: double, acc,exam_id__UNICAMP_2022: double, acc,exam_id__USP_2022: double, acc,exam_id__USP_2021: double, acc,exam_id__UNICAMP_2019: double, acc,exam_id__USP_2023: double, acc,exam_id__USP_2024: double, acc,exam_id__USP_2019: double, acc,exam_id__UNICAMP_2020: double, acc,exam_id__UNICAMP_2023: double, acc,exam_id__UNICAMP_2024: double, acc,exam_id__USP_2018: double, acc,exam_id__UNICAMP_2018: double, main_score: double>, harness|oab_exams|oab_exams|None|3: struct<acc,all: double, acc,exam_id__2012-06: double, acc,exam_id__2013-10: double, acc,exam_id__2017-22: double, acc,exam_id__2012-09: double, acc,exam_id__2011-05: double, acc,exam_id__2011-04: double, acc,exam_id__2011-03: double, acc,exam_id__2016-20: double, acc,exam_id__2012-07: double, acc,exam_id__2010-02: double, acc,exam_id__2012-06a: double, acc,exam_id__2015-16: double, acc,exam_id__2016-20a: double, acc,exam_id__2015-17: double, acc,exam_id__2013-11: double, acc,exam_id__2016-19: double, acc,exam_id__2014-15: double, acc,exam_id__2015-18: double, acc,exam_id__2017-24: double, acc,exam_id__2012-08: double, acc,exam_id__2010-01: double, acc,exam_id__2014-13: double, acc,exam_id__2017-23: double, acc,exam_id__2013-12: double, acc,exam_id__2016-21: double, acc,exam_id__2018-25: double, acc,exam_id__2014-14: double, main_score: double>, harness|assin2_rte|assin2_rte|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|assin2_sts|assin2_sts|None|15: struct<pearson,all: double, mse,all: double, main_score: double>, harness|faquad_nli|faquad_nli|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>> config_tasks: struct<harness|enem_challenge|enem_challenge: string, harness|bluex|bluex: string, harness|oab_exams|oab_exams: string, harness|assin2_rte|assin2_rte: string, harness|assin2_sts|assin2_sts: string, harness|faquad_nli|faquad_nli: string, harness|hatebr_offensive|hatebr_offensive: string, harness|portuguese_hate_speech|portuguese_hate_speech: string, harness|tweetsentbr|tweetsentbr: string> versions: struct<all: int64, harness|enem_challenge|enem_challenge: double, harness|bluex|bluex: double, harness|oab_exams|oab_exams: double, harness|assin2_rte|assin2_rte: double, harness|assin2_sts|assin2_sts: double, harness|faquad_nli|faquad_nli: double, harness|hatebr_offensive|hatebr_offensive: double, harness|portuguese_hate_speech|portuguese_hate_speech: double, harness|tweetsentbr|tweetsentbr: double> summary_tasks: struct<harness|enem_challenge|enem_challenge|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|bluex|bluex|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|oab_exams|oab_exams|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_rte|assin2_rte|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_sts|assin2_sts|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|faquad_nli|faquad_nli|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>> summary_general: struct<truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64> Traceback: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 228, in compute_first_rows_from_streaming_response iterable_dataset = iterable_dataset._resolve_features() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 3339, in _resolve_features features = _infer_features_from_batch(self.with_format(None)._head()) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2096, in _head return next(iter(self.iter(batch_size=n))) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 2300, in iter for key, example in iterator: File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1856, in __iter__ for key, pa_table in self._iter_arrow(): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1878, in _iter_arrow yield from self.ex_iterable._iter_arrow() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 536, in _iter_arrow yield new_key, pa.Table.from_batches(chunks_buffer) File "pyarrow/table.pxi", line 4116, in pyarrow.lib.Table.from_batches File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Schema at index 1 was different: config_general: struct<start_date: string, start_time: double, end_time: double, total_evaluation_time_seconds: double, has_chat_template: bool, chat_type: string, n_gpus: int64, accelerate_num_process: null, model_sha: string, model_dtype: string, model_memory_footprint: int64, model_num_parameters: int64, model_is_loaded_in_4bit: null, model_is_loaded_in_8bit: null, model_is_quantized: null, model_device: string, batch_size: int64, max_length: int64, max_ctx_length: int64, max_gen_toks: int64, model_name: string, job_id: int64, model_id: string, model_base_model: string, model_weight_type: string, model_revision: string, model_private: bool, model_type: string, model_architectures: string, submitted_time: timestamp[s], lm_eval_model_type: string, eval_version: string> results: struct<all_grouped_average: double, all_grouped_npm: double, all_grouped: struct<enem_challenge: double, bluex: double, oab_exams: double, assin2_rte: double, assin2_sts: double, faquad_nli: double, hatebr_offensive: double, portuguese_hate_speech: double, tweetsentbr: double>, all: struct<harness|enem_challenge|enem_challenge|None|3: double, harness|bluex|bluex|None|3: double, harness|oab_exams|oab_exams|None|3: double, harness|assin2_rte|assin2_rte|None|15: double, harness|assin2_sts|assin2_sts|None|15: double, harness|faquad_nli|faquad_nli|None|15: double, harness|hatebr_offensive|hatebr_offensive|None|25: double, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: double, harness|tweetsentbr|tweetsentbr|None|25: double>, harness|enem_challenge|enem_challenge|None|3: struct<acc,all: double, acc,exam_id__2012: double, acc,exam_id__2014: double, acc,exam_id__2015: double, acc,exam_id__2016: double, acc,exam_id__2017: double, acc,exam_id__2009: double, acc,exam_id__2010: double, acc,exam_id__2023: double, acc,exam_id__2022: double, acc,exam_id__2016_2: double, acc,exam_id__2013: double, acc,exam_id__2011: double, main_score: double>, harness|bluex|bluex|None|3: struct<acc,all: double, acc,exam_id__UNICAMP_2021_2: double, acc,exam_id__USP_2019: double, acc,exam_id__USP_2021: double, acc,exam_id__UNICAMP_2020: double, acc,exam_id__USP_2020: double, acc,exam_id__USP_2018: double, acc,exam_id__USP_2024: double, acc,exam_id__UNICAMP_2019: double, acc,exam_id__USP_2022: double, acc,exam_id__UNICAMP_2018: double, acc,exam_id__UNICAMP_2022: double, acc,exam_id__UNICAMP_2024: double, acc,exam_id__UNICAMP_2021_1: double, acc,exam_id__USP_2023: double, acc,exam_id__UNICAMP_2023: double, main_score: double>, harness|oab_exams|oab_exams|None|3: struct<acc,all: double, acc,exam_id__2014-13: double, acc,exam_id__2015-17: double, acc,exam_id__2017-24: double, acc,exam_id__2016-21: double, acc,exam_id__2013-11: double, acc,exam_id__2010-02: double, acc,exam_id__2016-20: double, acc,exam_id__2016-19: double, acc,exam_id__2011-03: double, acc,exam_id__2017-23: double, acc,exam_id__2014-15: double, acc,exam_id__2012-07: double, acc,exam_id__2018-25: double, acc,exam_id__2012-06a: double, acc,exam_id__2013-12: double, acc,exam_id__2010-01: double, acc,exam_id__2016-20a: double, acc,exam_id__2011-05: double, acc,exam_id__2012-08: double, acc,exam_id__2015-16: double, acc,exam_id__2011-04: double, acc,exam_id__2014-14: double, acc,exam_id__2017-22: double, acc,exam_id__2013-10: double, acc,exam_id__2012-09: double, acc,exam_id__2012-06: double, acc,exam_id__2015-18: double, main_score: double>, harness|assin2_rte|assin2_rte|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|assin2_sts|assin2_sts|None|15: struct<pearson,all: double, mse,all: double, main_score: double>, harness|faquad_nli|faquad_nli|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>> config_tasks: struct<harness|enem_challenge|enem_challenge: string, harness|bluex|bluex: string, harness|oab_exams|oab_exams: string, harness|assin2_rte|assin2_rte: string, harness|assin2_sts|assin2_sts: string, harness|faquad_nli|faquad_nli: string, harness|hatebr_offensive|hatebr_offensive: string, harness|portuguese_hate_speech|portuguese_hate_speech: string, harness|tweetsentbr|tweetsentbr: string> versions: struct<all: int64, harness|enem_challenge|enem_challenge: double, harness|bluex|bluex: double, harness|oab_exams|oab_exams: double, harness|assin2_rte|assin2_rte: double, harness|assin2_sts|assin2_sts: double, harness|faquad_nli|faquad_nli: double, harness|hatebr_offensive|hatebr_offensive: double, harness|portuguese_hate_speech|portuguese_hate_speech: double, harness|tweetsentbr|tweetsentbr: double> summary_tasks: struct<harness|enem_challenge|enem_challenge|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|bluex|bluex|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|oab_exams|oab_exams|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_rte|assin2_rte|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_sts|assin2_sts|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|faquad_nli|faquad_nli|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>> summary_general: struct<truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64> vs config_general: struct<start_date: string, start_time: double, end_time: double, total_evaluation_time_seconds: double, has_chat_template: bool, chat_type: string, n_gpus: int64, accelerate_num_process: null, model_sha: string, model_dtype: string, model_memory_footprint: int64, model_num_parameters: int64, model_is_loaded_in_4bit: null, model_is_loaded_in_8bit: null, model_is_quantized: null, model_device: string, batch_size: int64, max_length: int64, max_ctx_length: int64, max_gen_toks: int64, model_name: string, job_id: int64, model_id: string, model_base_model: string, model_weight_type: string, model_revision: string, model_private: bool, model_type: string, model_architectures: string, submitted_time: timestamp[s], lm_eval_model_type: string, eval_version: string> results: struct<all_grouped_average: double, all_grouped_npm: double, all_grouped: struct<enem_challenge: double, bluex: double, oab_exams: double, assin2_rte: double, assin2_sts: double, faquad_nli: double, hatebr_offensive: double, portuguese_hate_speech: double, tweetsentbr: double>, all: struct<harness|enem_challenge|enem_challenge|None|3: double, harness|bluex|bluex|None|3: double, harness|oab_exams|oab_exams|None|3: double, harness|assin2_rte|assin2_rte|None|15: double, harness|assin2_sts|assin2_sts|None|15: double, harness|faquad_nli|faquad_nli|None|15: double, harness|hatebr_offensive|hatebr_offensive|None|25: double, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: double, harness|tweetsentbr|tweetsentbr|None|25: double>, harness|enem_challenge|enem_challenge|None|3: struct<acc,all: double, acc,exam_id__2012: double, acc,exam_id__2009: double, acc,exam_id__2017: double, acc,exam_id__2016: double, acc,exam_id__2022: double, acc,exam_id__2015: double, acc,exam_id__2023: double, acc,exam_id__2016_2: double, acc,exam_id__2014: double, acc,exam_id__2010: double, acc,exam_id__2011: double, acc,exam_id__2013: double, main_score: double>, harness|bluex|bluex|None|3: struct<acc,all: double, acc,exam_id__UNICAMP_2021_1: double, acc,exam_id__UNICAMP_2021_2: double, acc,exam_id__USP_2020: double, acc,exam_id__UNICAMP_2022: double, acc,exam_id__USP_2022: double, acc,exam_id__USP_2021: double, acc,exam_id__UNICAMP_2019: double, acc,exam_id__USP_2023: double, acc,exam_id__USP_2024: double, acc,exam_id__USP_2019: double, acc,exam_id__UNICAMP_2020: double, acc,exam_id__UNICAMP_2023: double, acc,exam_id__UNICAMP_2024: double, acc,exam_id__USP_2018: double, acc,exam_id__UNICAMP_2018: double, main_score: double>, harness|oab_exams|oab_exams|None|3: struct<acc,all: double, acc,exam_id__2012-06: double, acc,exam_id__2013-10: double, acc,exam_id__2017-22: double, acc,exam_id__2012-09: double, acc,exam_id__2011-05: double, acc,exam_id__2011-04: double, acc,exam_id__2011-03: double, acc,exam_id__2016-20: double, acc,exam_id__2012-07: double, acc,exam_id__2010-02: double, acc,exam_id__2012-06a: double, acc,exam_id__2015-16: double, acc,exam_id__2016-20a: double, acc,exam_id__2015-17: double, acc,exam_id__2013-11: double, acc,exam_id__2016-19: double, acc,exam_id__2014-15: double, acc,exam_id__2015-18: double, acc,exam_id__2017-24: double, acc,exam_id__2012-08: double, acc,exam_id__2010-01: double, acc,exam_id__2014-13: double, acc,exam_id__2017-23: double, acc,exam_id__2013-12: double, acc,exam_id__2016-21: double, acc,exam_id__2018-25: double, acc,exam_id__2014-14: double, main_score: double>, harness|assin2_rte|assin2_rte|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|assin2_sts|assin2_sts|None|15: struct<pearson,all: double, mse,all: double, main_score: double>, harness|faquad_nli|faquad_nli|None|15: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<f1_macro,all: double, acc,all: double, main_score: double>> config_tasks: struct<harness|enem_challenge|enem_challenge: string, harness|bluex|bluex: string, harness|oab_exams|oab_exams: string, harness|assin2_rte|assin2_rte: string, harness|assin2_sts|assin2_sts: string, harness|faquad_nli|faquad_nli: string, harness|hatebr_offensive|hatebr_offensive: string, harness|portuguese_hate_speech|portuguese_hate_speech: string, harness|tweetsentbr|tweetsentbr: string> versions: struct<all: int64, harness|enem_challenge|enem_challenge: double, harness|bluex|bluex: double, harness|oab_exams|oab_exams: double, harness|assin2_rte|assin2_rte: double, harness|assin2_sts|assin2_sts: double, harness|faquad_nli|faquad_nli: double, harness|hatebr_offensive|hatebr_offensive: double, harness|portuguese_hate_speech|portuguese_hate_speech: double, harness|tweetsentbr|tweetsentbr: double> summary_tasks: struct<harness|enem_challenge|enem_challenge|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|bluex|bluex|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|oab_exams|oab_exams|None|3: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_rte|assin2_rte|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|assin2_sts|assin2_sts|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|faquad_nli|faquad_nli|None|15: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|hatebr_offensive|hatebr_offensive|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|portuguese_hate_speech|portuguese_hate_speech|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>, harness|tweetsentbr|tweetsentbr|None|25: struct<sample_size: int64, truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64, mean_seq_length: double, min_seq_length: int64, max_seq_length: int64, max_ctx_length: int64, max_gen_toks: int64, mean_original_fewshots_size: double, mean_effective_fewshot_size: double>> summary_general: struct<truncated: int64, non_truncated: int64, padded: int64, non_padded: int64, fewshots_truncated: int64>
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
No dataset card yet
- Downloads last month
- 229