| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.2751347013642096, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7825, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7565, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7408, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7496, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7424, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7112, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7409, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7646, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7129, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7671, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7166, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7113, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6682, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7644, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6813, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6447, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6587, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6657, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0002, | |
| "loss": 0.681, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7142, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "eval_loss": 0.6789492964744568, | |
| "eval_runtime": 280.115, | |
| "eval_samples_per_second": 3.57, | |
| "eval_steps_per_second": 0.892, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "mmlu_eval_accuracy": 0.4601645000494307, | |
| "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365, | |
| "mmlu_eval_accuracy_anatomy": 0.5714285714285714, | |
| "mmlu_eval_accuracy_astronomy": 0.4375, | |
| "mmlu_eval_accuracy_business_ethics": 0.5454545454545454, | |
| "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655, | |
| "mmlu_eval_accuracy_college_biology": 0.4375, | |
| "mmlu_eval_accuracy_college_chemistry": 0.125, | |
| "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, | |
| "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, | |
| "mmlu_eval_accuracy_college_medicine": 0.2727272727272727, | |
| "mmlu_eval_accuracy_college_physics": 0.45454545454545453, | |
| "mmlu_eval_accuracy_computer_security": 0.36363636363636365, | |
| "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, | |
| "mmlu_eval_accuracy_econometrics": 0.16666666666666666, | |
| "mmlu_eval_accuracy_electrical_engineering": 0.375, | |
| "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073, | |
| "mmlu_eval_accuracy_formal_logic": 0.2857142857142857, | |
| "mmlu_eval_accuracy_global_facts": 0.6, | |
| "mmlu_eval_accuracy_high_school_biology": 0.34375, | |
| "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182, | |
| "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, | |
| "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112, | |
| "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273, | |
| "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666, | |
| "mmlu_eval_accuracy_high_school_macroeconomics": 0.3488372093023256, | |
| "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793, | |
| "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156, | |
| "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826, | |
| "mmlu_eval_accuracy_high_school_psychology": 0.75, | |
| "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913, | |
| "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909, | |
| "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384, | |
| "mmlu_eval_accuracy_human_aging": 0.6956521739130435, | |
| "mmlu_eval_accuracy_human_sexuality": 0.5, | |
| "mmlu_eval_accuracy_international_law": 0.7692307692307693, | |
| "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365, | |
| "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, | |
| "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, | |
| "mmlu_eval_accuracy_management": 0.5454545454545454, | |
| "mmlu_eval_accuracy_marketing": 0.68, | |
| "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
| "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186, | |
| "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316, | |
| "mmlu_eval_accuracy_moral_scenarios": 0.24, | |
| "mmlu_eval_accuracy_nutrition": 0.5757575757575758, | |
| "mmlu_eval_accuracy_philosophy": 0.47058823529411764, | |
| "mmlu_eval_accuracy_prehistory": 0.4857142857142857, | |
| "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, | |
| "mmlu_eval_accuracy_professional_law": 0.3411764705882353, | |
| "mmlu_eval_accuracy_professional_medicine": 0.41935483870967744, | |
| "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375, | |
| "mmlu_eval_accuracy_public_relations": 0.5, | |
| "mmlu_eval_accuracy_security_studies": 0.5185185185185185, | |
| "mmlu_eval_accuracy_sociology": 0.5909090909090909, | |
| "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, | |
| "mmlu_eval_accuracy_virology": 0.3888888888888889, | |
| "mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
| "mmlu_loss": 0.9642877595465115, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6854, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7256, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7505, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0002, | |
| "loss": 0.618, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6726, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6882, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6982, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0002, | |
| "loss": 0.661, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0002, | |
| "loss": 0.699, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6867, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7127, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7141, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6483, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6532, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6474, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6728, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6736, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7164, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6844, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6797, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "eval_loss": 0.6707000732421875, | |
| "eval_runtime": 280.204, | |
| "eval_samples_per_second": 3.569, | |
| "eval_steps_per_second": 0.892, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "mmlu_eval_accuracy": 0.4521886129310749, | |
| "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727, | |
| "mmlu_eval_accuracy_anatomy": 0.5714285714285714, | |
| "mmlu_eval_accuracy_astronomy": 0.375, | |
| "mmlu_eval_accuracy_business_ethics": 0.5454545454545454, | |
| "mmlu_eval_accuracy_clinical_knowledge": 0.41379310344827586, | |
| "mmlu_eval_accuracy_college_biology": 0.4375, | |
| "mmlu_eval_accuracy_college_chemistry": 0.0, | |
| "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365, | |
| "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727, | |
| "mmlu_eval_accuracy_college_medicine": 0.2727272727272727, | |
| "mmlu_eval_accuracy_college_physics": 0.45454545454545453, | |
| "mmlu_eval_accuracy_computer_security": 0.36363636363636365, | |
| "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231, | |
| "mmlu_eval_accuracy_econometrics": 0.16666666666666666, | |
| "mmlu_eval_accuracy_electrical_engineering": 0.375, | |
| "mmlu_eval_accuracy_elementary_mathematics": 0.36585365853658536, | |
| "mmlu_eval_accuracy_formal_logic": 0.35714285714285715, | |
| "mmlu_eval_accuracy_global_facts": 0.4, | |
| "mmlu_eval_accuracy_high_school_biology": 0.375, | |
| "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091, | |
| "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, | |
| "mmlu_eval_accuracy_high_school_european_history": 0.5555555555555556, | |
| "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818, | |
| "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666, | |
| "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723, | |
| "mmlu_eval_accuracy_high_school_mathematics": 0.1724137931034483, | |
| "mmlu_eval_accuracy_high_school_microeconomics": 0.46153846153846156, | |
| "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826, | |
| "mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667, | |
| "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913, | |
| "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, | |
| "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384, | |
| "mmlu_eval_accuracy_human_aging": 0.6521739130434783, | |
| "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667, | |
| "mmlu_eval_accuracy_international_law": 0.6923076923076923, | |
| "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365, | |
| "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, | |
| "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, | |
| "mmlu_eval_accuracy_management": 0.45454545454545453, | |
| "mmlu_eval_accuracy_marketing": 0.76, | |
| "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
| "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186, | |
| "mmlu_eval_accuracy_moral_disputes": 0.42105263157894735, | |
| "mmlu_eval_accuracy_moral_scenarios": 0.24, | |
| "mmlu_eval_accuracy_nutrition": 0.5757575757575758, | |
| "mmlu_eval_accuracy_philosophy": 0.47058823529411764, | |
| "mmlu_eval_accuracy_prehistory": 0.5142857142857142, | |
| "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, | |
| "mmlu_eval_accuracy_professional_law": 0.3411764705882353, | |
| "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644, | |
| "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754, | |
| "mmlu_eval_accuracy_public_relations": 0.4166666666666667, | |
| "mmlu_eval_accuracy_security_studies": 0.5185185185185185, | |
| "mmlu_eval_accuracy_sociology": 0.6363636363636364, | |
| "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454, | |
| "mmlu_eval_accuracy_virology": 0.5, | |
| "mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
| "mmlu_loss": 0.92568634446545, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7059, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0002, | |
| "loss": 0.79, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7714, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0002, | |
| "loss": 0.644, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7102, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6229, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6742, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6997, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6598, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7154, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6796, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6769, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6663, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6758, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7022, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0002, | |
| "loss": 0.706, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7188, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7077, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6352, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6203, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "eval_loss": 0.664973795413971, | |
| "eval_runtime": 280.1433, | |
| "eval_samples_per_second": 3.57, | |
| "eval_steps_per_second": 0.892, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "mmlu_eval_accuracy": 0.44929002048717553, | |
| "mmlu_eval_accuracy_abstract_algebra": 0.09090909090909091, | |
| "mmlu_eval_accuracy_anatomy": 0.5714285714285714, | |
| "mmlu_eval_accuracy_astronomy": 0.375, | |
| "mmlu_eval_accuracy_business_ethics": 0.6363636363636364, | |
| "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655, | |
| "mmlu_eval_accuracy_college_biology": 0.375, | |
| "mmlu_eval_accuracy_college_chemistry": 0.0, | |
| "mmlu_eval_accuracy_college_computer_science": 0.45454545454545453, | |
| "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182, | |
| "mmlu_eval_accuracy_college_medicine": 0.36363636363636365, | |
| "mmlu_eval_accuracy_college_physics": 0.36363636363636365, | |
| "mmlu_eval_accuracy_computer_security": 0.2727272727272727, | |
| "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464, | |
| "mmlu_eval_accuracy_econometrics": 0.16666666666666666, | |
| "mmlu_eval_accuracy_electrical_engineering": 0.375, | |
| "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637, | |
| "mmlu_eval_accuracy_formal_logic": 0.2857142857142857, | |
| "mmlu_eval_accuracy_global_facts": 0.4, | |
| "mmlu_eval_accuracy_high_school_biology": 0.375, | |
| "mmlu_eval_accuracy_high_school_chemistry": 0.4090909090909091, | |
| "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556, | |
| "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112, | |
| "mmlu_eval_accuracy_high_school_geography": 0.6818181818181818, | |
| "mmlu_eval_accuracy_high_school_government_and_politics": 0.6666666666666666, | |
| "mmlu_eval_accuracy_high_school_macroeconomics": 0.32558139534883723, | |
| "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276, | |
| "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231, | |
| "mmlu_eval_accuracy_high_school_physics": 0.35294117647058826, | |
| "mmlu_eval_accuracy_high_school_psychology": 0.7166666666666667, | |
| "mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608, | |
| "mmlu_eval_accuracy_high_school_us_history": 0.6818181818181818, | |
| "mmlu_eval_accuracy_high_school_world_history": 0.5, | |
| "mmlu_eval_accuracy_human_aging": 0.6956521739130435, | |
| "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667, | |
| "mmlu_eval_accuracy_international_law": 0.6923076923076923, | |
| "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453, | |
| "mmlu_eval_accuracy_logical_fallacies": 0.5555555555555556, | |
| "mmlu_eval_accuracy_machine_learning": 0.2727272727272727, | |
| "mmlu_eval_accuracy_management": 0.5454545454545454, | |
| "mmlu_eval_accuracy_marketing": 0.72, | |
| "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273, | |
| "mmlu_eval_accuracy_miscellaneous": 0.686046511627907, | |
| "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316, | |
| "mmlu_eval_accuracy_moral_scenarios": 0.23, | |
| "mmlu_eval_accuracy_nutrition": 0.5757575757575758, | |
| "mmlu_eval_accuracy_philosophy": 0.47058823529411764, | |
| "mmlu_eval_accuracy_prehistory": 0.42857142857142855, | |
| "mmlu_eval_accuracy_professional_accounting": 0.25806451612903225, | |
| "mmlu_eval_accuracy_professional_law": 0.3588235294117647, | |
| "mmlu_eval_accuracy_professional_medicine": 0.3870967741935484, | |
| "mmlu_eval_accuracy_professional_psychology": 0.4057971014492754, | |
| "mmlu_eval_accuracy_public_relations": 0.4166666666666667, | |
| "mmlu_eval_accuracy_security_studies": 0.5555555555555556, | |
| "mmlu_eval_accuracy_sociology": 0.5909090909090909, | |
| "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364, | |
| "mmlu_eval_accuracy_virology": 0.5, | |
| "mmlu_eval_accuracy_world_religions": 0.7368421052631579, | |
| "mmlu_loss": 0.9583329685985575, | |
| "step": 600 | |
| } | |
| ], | |
| "max_steps": 5000, | |
| "num_train_epochs": 3, | |
| "total_flos": 1.7171969273561088e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |