Safetensors
qwen2_vl
CIGEval-Qwen2-VL-7B-Instruct-sft / trainer_state.json
imryanxu's picture
Upload 13 files
e2c3348 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9843184559710494,
"eval_steps": 500,
"global_step": 102,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019300361881785282,
"grad_norm": 20.085893630981445,
"learning_rate": 0.0,
"loss": 1.5371,
"step": 1
},
{
"epoch": 0.038600723763570564,
"grad_norm": 24.923002243041992,
"learning_rate": 5e-06,
"loss": 1.5356,
"step": 2
},
{
"epoch": 0.05790108564535585,
"grad_norm": 21.43843650817871,
"learning_rate": 7.924812503605782e-06,
"loss": 1.4658,
"step": 3
},
{
"epoch": 0.07720144752714113,
"grad_norm": 10.800460815429688,
"learning_rate": 1e-05,
"loss": 1.1598,
"step": 4
},
{
"epoch": 0.09650180940892641,
"grad_norm": 6.396805286407471,
"learning_rate": 1e-05,
"loss": 1.0447,
"step": 5
},
{
"epoch": 0.1158021712907117,
"grad_norm": 7.415082931518555,
"learning_rate": 1e-05,
"loss": 1.0807,
"step": 6
},
{
"epoch": 0.13510253317249699,
"grad_norm": 6.602322578430176,
"learning_rate": 1e-05,
"loss": 1.0522,
"step": 7
},
{
"epoch": 0.15440289505428226,
"grad_norm": 5.840353012084961,
"learning_rate": 1e-05,
"loss": 0.9866,
"step": 8
},
{
"epoch": 0.17370325693606756,
"grad_norm": 5.387495040893555,
"learning_rate": 1e-05,
"loss": 1.0037,
"step": 9
},
{
"epoch": 0.19300361881785283,
"grad_norm": 4.899038791656494,
"learning_rate": 1e-05,
"loss": 0.9326,
"step": 10
},
{
"epoch": 0.21230398069963813,
"grad_norm": 4.857559680938721,
"learning_rate": 1e-05,
"loss": 0.8738,
"step": 11
},
{
"epoch": 0.2316043425814234,
"grad_norm": 7.552295684814453,
"learning_rate": 1e-05,
"loss": 0.9318,
"step": 12
},
{
"epoch": 0.25090470446320867,
"grad_norm": 72.52302551269531,
"learning_rate": 1e-05,
"loss": 0.9038,
"step": 13
},
{
"epoch": 0.27020506634499397,
"grad_norm": 6.182501316070557,
"learning_rate": 1e-05,
"loss": 0.9649,
"step": 14
},
{
"epoch": 0.28950542822677927,
"grad_norm": 14.671534538269043,
"learning_rate": 1e-05,
"loss": 0.9451,
"step": 15
},
{
"epoch": 0.3088057901085645,
"grad_norm": 7.690091133117676,
"learning_rate": 1e-05,
"loss": 0.9173,
"step": 16
},
{
"epoch": 0.3281061519903498,
"grad_norm": 5.256872653961182,
"learning_rate": 1e-05,
"loss": 0.8687,
"step": 17
},
{
"epoch": 0.3474065138721351,
"grad_norm": 5.03083610534668,
"learning_rate": 1e-05,
"loss": 0.8548,
"step": 18
},
{
"epoch": 0.36670687575392036,
"grad_norm": 6.011650085449219,
"learning_rate": 1e-05,
"loss": 0.9021,
"step": 19
},
{
"epoch": 0.38600723763570566,
"grad_norm": 4.863091945648193,
"learning_rate": 1e-05,
"loss": 0.863,
"step": 20
},
{
"epoch": 0.40530759951749096,
"grad_norm": 6.599653244018555,
"learning_rate": 1e-05,
"loss": 0.8321,
"step": 21
},
{
"epoch": 0.42460796139927626,
"grad_norm": 11.472197532653809,
"learning_rate": 1e-05,
"loss": 0.8921,
"step": 22
},
{
"epoch": 0.4439083232810615,
"grad_norm": 5.892179489135742,
"learning_rate": 1e-05,
"loss": 0.9132,
"step": 23
},
{
"epoch": 0.4632086851628468,
"grad_norm": 5.178156852722168,
"learning_rate": 1e-05,
"loss": 0.891,
"step": 24
},
{
"epoch": 0.4825090470446321,
"grad_norm": 6.018004894256592,
"learning_rate": 1e-05,
"loss": 0.8839,
"step": 25
},
{
"epoch": 0.5018094089264173,
"grad_norm": 4.832773208618164,
"learning_rate": 1e-05,
"loss": 0.8527,
"step": 26
},
{
"epoch": 0.5211097708082026,
"grad_norm": 4.564591884613037,
"learning_rate": 1e-05,
"loss": 0.8524,
"step": 27
},
{
"epoch": 0.5404101326899879,
"grad_norm": 4.709479808807373,
"learning_rate": 1e-05,
"loss": 0.8517,
"step": 28
},
{
"epoch": 0.5597104945717732,
"grad_norm": 5.215322971343994,
"learning_rate": 1e-05,
"loss": 0.8427,
"step": 29
},
{
"epoch": 0.5790108564535585,
"grad_norm": 4.680248737335205,
"learning_rate": 1e-05,
"loss": 0.8777,
"step": 30
},
{
"epoch": 0.5983112183353438,
"grad_norm": 5.544800281524658,
"learning_rate": 1e-05,
"loss": 0.8687,
"step": 31
},
{
"epoch": 0.617611580217129,
"grad_norm": 6.865499496459961,
"learning_rate": 1e-05,
"loss": 0.8827,
"step": 32
},
{
"epoch": 0.6369119420989143,
"grad_norm": 4.069559097290039,
"learning_rate": 1e-05,
"loss": 0.8862,
"step": 33
},
{
"epoch": 0.6562123039806996,
"grad_norm": 5.174818992614746,
"learning_rate": 1e-05,
"loss": 0.8618,
"step": 34
},
{
"epoch": 0.6755126658624849,
"grad_norm": 12.915410041809082,
"learning_rate": 1e-05,
"loss": 0.9145,
"step": 35
},
{
"epoch": 0.6948130277442702,
"grad_norm": 3.8403801918029785,
"learning_rate": 1e-05,
"loss": 0.8393,
"step": 36
},
{
"epoch": 0.7141133896260555,
"grad_norm": 5.092712879180908,
"learning_rate": 1e-05,
"loss": 0.8467,
"step": 37
},
{
"epoch": 0.7334137515078407,
"grad_norm": 3.652506113052368,
"learning_rate": 1e-05,
"loss": 0.8165,
"step": 38
},
{
"epoch": 0.752714113389626,
"grad_norm": 4.251276969909668,
"learning_rate": 1e-05,
"loss": 0.8276,
"step": 39
},
{
"epoch": 0.7720144752714113,
"grad_norm": 4.063849449157715,
"learning_rate": 1e-05,
"loss": 0.8141,
"step": 40
},
{
"epoch": 0.7913148371531966,
"grad_norm": 13.772127151489258,
"learning_rate": 1e-05,
"loss": 0.8257,
"step": 41
},
{
"epoch": 0.8106151990349819,
"grad_norm": 3.2057080268859863,
"learning_rate": 1e-05,
"loss": 0.8811,
"step": 42
},
{
"epoch": 0.8299155609167672,
"grad_norm": 3.1989691257476807,
"learning_rate": 1e-05,
"loss": 0.8089,
"step": 43
},
{
"epoch": 0.8492159227985525,
"grad_norm": 3.2385919094085693,
"learning_rate": 1e-05,
"loss": 0.868,
"step": 44
},
{
"epoch": 0.8685162846803377,
"grad_norm": 3.792576789855957,
"learning_rate": 1e-05,
"loss": 0.8358,
"step": 45
},
{
"epoch": 0.887816646562123,
"grad_norm": 4.059788703918457,
"learning_rate": 1e-05,
"loss": 0.8842,
"step": 46
},
{
"epoch": 0.9071170084439083,
"grad_norm": 5.195308685302734,
"learning_rate": 1e-05,
"loss": 0.8498,
"step": 47
},
{
"epoch": 0.9264173703256936,
"grad_norm": 5.914234638214111,
"learning_rate": 1e-05,
"loss": 0.8596,
"step": 48
},
{
"epoch": 0.9457177322074789,
"grad_norm": 4.043056964874268,
"learning_rate": 1e-05,
"loss": 0.9006,
"step": 49
},
{
"epoch": 0.9650180940892642,
"grad_norm": 3.690598726272583,
"learning_rate": 1e-05,
"loss": 0.8672,
"step": 50
},
{
"epoch": 0.9843184559710495,
"grad_norm": 3.392371416091919,
"learning_rate": 1e-05,
"loss": 0.8641,
"step": 51
},
{
"epoch": 1.0193003618817853,
"grad_norm": 2.909470558166504,
"learning_rate": 1e-05,
"loss": 1.1104,
"step": 52
},
{
"epoch": 1.0386007237635706,
"grad_norm": 3.598850965499878,
"learning_rate": 1e-05,
"loss": 0.7121,
"step": 53
},
{
"epoch": 1.057901085645356,
"grad_norm": 2.9036705493927,
"learning_rate": 1e-05,
"loss": 0.7087,
"step": 54
},
{
"epoch": 1.0772014475271412,
"grad_norm": 4.788527011871338,
"learning_rate": 1e-05,
"loss": 0.7226,
"step": 55
},
{
"epoch": 1.0965018094089265,
"grad_norm": 4.0117573738098145,
"learning_rate": 1e-05,
"loss": 0.7155,
"step": 56
},
{
"epoch": 1.1158021712907118,
"grad_norm": 5.838222980499268,
"learning_rate": 1e-05,
"loss": 0.714,
"step": 57
},
{
"epoch": 1.135102533172497,
"grad_norm": 4.493499279022217,
"learning_rate": 1e-05,
"loss": 0.6922,
"step": 58
},
{
"epoch": 1.1544028950542822,
"grad_norm": 8.510027885437012,
"learning_rate": 1e-05,
"loss": 0.7162,
"step": 59
},
{
"epoch": 1.1737032569360675,
"grad_norm": 4.332089900970459,
"learning_rate": 1e-05,
"loss": 0.6684,
"step": 60
},
{
"epoch": 1.1930036188178528,
"grad_norm": 6.2718634605407715,
"learning_rate": 1e-05,
"loss": 0.6902,
"step": 61
},
{
"epoch": 1.212303980699638,
"grad_norm": 4.732446670532227,
"learning_rate": 1e-05,
"loss": 0.6815,
"step": 62
},
{
"epoch": 1.2316043425814234,
"grad_norm": 3.6639814376831055,
"learning_rate": 1e-05,
"loss": 0.679,
"step": 63
},
{
"epoch": 1.2509047044632087,
"grad_norm": 3.1242263317108154,
"learning_rate": 1e-05,
"loss": 0.6972,
"step": 64
},
{
"epoch": 1.270205066344994,
"grad_norm": 3.297794818878174,
"learning_rate": 1e-05,
"loss": 0.6767,
"step": 65
},
{
"epoch": 1.2895054282267793,
"grad_norm": 3.250779151916504,
"learning_rate": 1e-05,
"loss": 0.6771,
"step": 66
},
{
"epoch": 1.3088057901085646,
"grad_norm": 2.957329273223877,
"learning_rate": 1e-05,
"loss": 0.6964,
"step": 67
},
{
"epoch": 1.3281061519903499,
"grad_norm": 4.555089473724365,
"learning_rate": 1e-05,
"loss": 0.679,
"step": 68
},
{
"epoch": 1.3474065138721352,
"grad_norm": 8.270596504211426,
"learning_rate": 1e-05,
"loss": 0.7211,
"step": 69
},
{
"epoch": 1.3667068757539202,
"grad_norm": 4.109578609466553,
"learning_rate": 1e-05,
"loss": 0.6777,
"step": 70
},
{
"epoch": 1.3860072376357055,
"grad_norm": 3.5171566009521484,
"learning_rate": 1e-05,
"loss": 0.7172,
"step": 71
},
{
"epoch": 1.4053075995174908,
"grad_norm": 3.425497055053711,
"learning_rate": 1e-05,
"loss": 0.693,
"step": 72
},
{
"epoch": 1.4246079613992761,
"grad_norm": 4.035482883453369,
"learning_rate": 1e-05,
"loss": 0.7502,
"step": 73
},
{
"epoch": 1.4439083232810614,
"grad_norm": 7.8259406089782715,
"learning_rate": 1e-05,
"loss": 0.7002,
"step": 74
},
{
"epoch": 1.4632086851628467,
"grad_norm": 6.383781433105469,
"learning_rate": 1e-05,
"loss": 0.733,
"step": 75
},
{
"epoch": 1.482509047044632,
"grad_norm": 4.586795806884766,
"learning_rate": 1e-05,
"loss": 0.6888,
"step": 76
},
{
"epoch": 1.5018094089264173,
"grad_norm": 3.248703718185425,
"learning_rate": 1e-05,
"loss": 0.7034,
"step": 77
},
{
"epoch": 1.5211097708082026,
"grad_norm": 5.46513032913208,
"learning_rate": 1e-05,
"loss": 0.7176,
"step": 78
},
{
"epoch": 1.540410132689988,
"grad_norm": 6.007761001586914,
"learning_rate": 1e-05,
"loss": 0.6983,
"step": 79
},
{
"epoch": 1.5597104945717732,
"grad_norm": 3.7299880981445312,
"learning_rate": 1e-05,
"loss": 0.72,
"step": 80
},
{
"epoch": 1.5790108564535585,
"grad_norm": 3.501554250717163,
"learning_rate": 1e-05,
"loss": 0.727,
"step": 81
},
{
"epoch": 1.5983112183353438,
"grad_norm": 3.446808099746704,
"learning_rate": 1e-05,
"loss": 0.717,
"step": 82
},
{
"epoch": 1.6176115802171291,
"grad_norm": 3.2995755672454834,
"learning_rate": 1e-05,
"loss": 0.6923,
"step": 83
},
{
"epoch": 1.6369119420989144,
"grad_norm": 7.1533098220825195,
"learning_rate": 1e-05,
"loss": 0.6898,
"step": 84
},
{
"epoch": 1.6562123039806997,
"grad_norm": 3.07770037651062,
"learning_rate": 1e-05,
"loss": 0.6799,
"step": 85
},
{
"epoch": 1.675512665862485,
"grad_norm": 3.425074338912964,
"learning_rate": 1e-05,
"loss": 0.7234,
"step": 86
},
{
"epoch": 1.6948130277442703,
"grad_norm": 3.45398211479187,
"learning_rate": 1e-05,
"loss": 0.7196,
"step": 87
},
{
"epoch": 1.7141133896260556,
"grad_norm": 4.230635643005371,
"learning_rate": 1e-05,
"loss": 0.7123,
"step": 88
},
{
"epoch": 1.7334137515078407,
"grad_norm": 3.4547479152679443,
"learning_rate": 1e-05,
"loss": 0.7317,
"step": 89
},
{
"epoch": 1.752714113389626,
"grad_norm": 3.5444135665893555,
"learning_rate": 1e-05,
"loss": 0.7076,
"step": 90
},
{
"epoch": 1.7720144752714113,
"grad_norm": 3.361970901489258,
"learning_rate": 1e-05,
"loss": 0.7349,
"step": 91
},
{
"epoch": 1.7913148371531966,
"grad_norm": 4.442152976989746,
"learning_rate": 1e-05,
"loss": 0.6986,
"step": 92
},
{
"epoch": 1.810615199034982,
"grad_norm": 4.054971694946289,
"learning_rate": 1e-05,
"loss": 0.7092,
"step": 93
},
{
"epoch": 1.8299155609167672,
"grad_norm": 2.9335885047912598,
"learning_rate": 1e-05,
"loss": 0.7208,
"step": 94
},
{
"epoch": 1.8492159227985525,
"grad_norm": 4.029760360717773,
"learning_rate": 1e-05,
"loss": 0.6943,
"step": 95
},
{
"epoch": 1.8685162846803376,
"grad_norm": 4.110034942626953,
"learning_rate": 1e-05,
"loss": 0.7137,
"step": 96
},
{
"epoch": 1.887816646562123,
"grad_norm": 4.746143341064453,
"learning_rate": 1e-05,
"loss": 0.7243,
"step": 97
},
{
"epoch": 1.9071170084439082,
"grad_norm": 2.9600183963775635,
"learning_rate": 1e-05,
"loss": 0.7038,
"step": 98
},
{
"epoch": 1.9264173703256935,
"grad_norm": 2.582796096801758,
"learning_rate": 1e-05,
"loss": 0.6802,
"step": 99
},
{
"epoch": 1.9457177322074788,
"grad_norm": 4.347536087036133,
"learning_rate": 1e-05,
"loss": 0.7,
"step": 100
},
{
"epoch": 1.965018094089264,
"grad_norm": 3.207463502883911,
"learning_rate": 1e-05,
"loss": 0.7254,
"step": 101
},
{
"epoch": 1.9843184559710494,
"grad_norm": 3.204589605331421,
"learning_rate": 1e-05,
"loss": 0.7292,
"step": 102
},
{
"epoch": 1.9843184559710494,
"step": 102,
"total_flos": 5.52152723399639e+16,
"train_loss": 0.8226426015881931,
"train_runtime": 11731.6659,
"train_samples_per_second": 1.13,
"train_steps_per_second": 0.009
}
],
"logging_steps": 1.0,
"max_steps": 102,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.52152723399639e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}