Training in progress, step 1500, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 14293800
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:017f0be8f8d99715ca3ec28b54209784eacd83dd0271af9c31bfde94b646e6eb
|
| 3 |
size 14293800
|
last-checkpoint/optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 7580068
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7913747af857b0bb4337964cb26b9bdbe346330b06cd7241762a40d602d11c01
|
| 3 |
size 7580068
|
last-checkpoint/rng_state.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 14244
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d3005759b561fa9f699a68a943ec85483904a8709ac1b807c326652d75f02a21
|
| 3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1064
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7592e941fa77449d9f7015724b6a3901e596c11e155596880b96def1f284283
|
| 3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
-
"best_metric": 0.
|
| 3 |
-
"best_model_checkpoint": "miner_id_24/checkpoint-
|
| 4 |
-
"epoch":
|
| 5 |
"eval_steps": 300,
|
| 6 |
-
"global_step":
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
@@ -887,6 +887,224 @@
|
|
| 887 |
"eval_samples_per_second": 52.614,
|
| 888 |
"eval_steps_per_second": 26.307,
|
| 889 |
"step": 1200
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 890 |
}
|
| 891 |
],
|
| 892 |
"logging_steps": 10,
|
|
@@ -910,12 +1128,12 @@
|
|
| 910 |
"should_evaluate": false,
|
| 911 |
"should_log": false,
|
| 912 |
"should_save": true,
|
| 913 |
-
"should_training_stop":
|
| 914 |
},
|
| 915 |
"attributes": {}
|
| 916 |
}
|
| 917 |
},
|
| 918 |
-
"total_flos":
|
| 919 |
"train_batch_size": 2,
|
| 920 |
"trial_name": null,
|
| 921 |
"trial_params": null
|
|
|
|
| 1 |
{
|
| 2 |
+
"best_metric": 0.15164193511009216,
|
| 3 |
+
"best_model_checkpoint": "miner_id_24/checkpoint-1500",
|
| 4 |
+
"epoch": 2.3802439750074385,
|
| 5 |
"eval_steps": 300,
|
| 6 |
+
"global_step": 1500,
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
|
|
| 887 |
"eval_samples_per_second": 52.614,
|
| 888 |
"eval_steps_per_second": 26.307,
|
| 889 |
"step": 1200
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"epoch": 1.920063473172667,
|
| 893 |
+
"grad_norm": 0.09009451419115067,
|
| 894 |
+
"learning_rate": 4.7745751406263165e-06,
|
| 895 |
+
"loss": 2.4052,
|
| 896 |
+
"step": 1210
|
| 897 |
+
},
|
| 898 |
+
{
|
| 899 |
+
"epoch": 1.9359317663393831,
|
| 900 |
+
"grad_norm": 0.17990677058696747,
|
| 901 |
+
"learning_rate": 4.460970818476718e-06,
|
| 902 |
+
"loss": 1.4775,
|
| 903 |
+
"step": 1220
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"epoch": 1.9518000595060994,
|
| 907 |
+
"grad_norm": 0.12437444925308228,
|
| 908 |
+
"learning_rate": 4.15700759802175e-06,
|
| 909 |
+
"loss": 0.324,
|
| 910 |
+
"step": 1230
|
| 911 |
+
},
|
| 912 |
+
{
|
| 913 |
+
"epoch": 1.9676683526728156,
|
| 914 |
+
"grad_norm": 10.639348983764648,
|
| 915 |
+
"learning_rate": 3.862828160801707e-06,
|
| 916 |
+
"loss": 5.8774,
|
| 917 |
+
"step": 1240
|
| 918 |
+
},
|
| 919 |
+
{
|
| 920 |
+
"epoch": 1.983536645839532,
|
| 921 |
+
"grad_norm": 2.456026554107666,
|
| 922 |
+
"learning_rate": 3.578570595810274e-06,
|
| 923 |
+
"loss": 1.141,
|
| 924 |
+
"step": 1250
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"epoch": 1.999404939006248,
|
| 928 |
+
"grad_norm": 0.1565844863653183,
|
| 929 |
+
"learning_rate": 3.3043683346749647e-06,
|
| 930 |
+
"loss": 0.5981,
|
| 931 |
+
"step": 1260
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"epoch": 2.0152732321729645,
|
| 935 |
+
"grad_norm": 0.07377956062555313,
|
| 936 |
+
"learning_rate": 3.040350089023844e-06,
|
| 937 |
+
"loss": 9.0139,
|
| 938 |
+
"step": 1270
|
| 939 |
+
},
|
| 940 |
+
{
|
| 941 |
+
"epoch": 2.0311415253396805,
|
| 942 |
+
"grad_norm": 0.08124233037233353,
|
| 943 |
+
"learning_rate": 2.786639790067719e-06,
|
| 944 |
+
"loss": 0.0013,
|
| 945 |
+
"step": 1280
|
| 946 |
+
},
|
| 947 |
+
{
|
| 948 |
+
"epoch": 2.047009818506397,
|
| 949 |
+
"grad_norm": 2.749391794204712,
|
| 950 |
+
"learning_rate": 2.543356530426394e-06,
|
| 951 |
+
"loss": 3.0041,
|
| 952 |
+
"step": 1290
|
| 953 |
+
},
|
| 954 |
+
{
|
| 955 |
+
"epoch": 2.062878111673113,
|
| 956 |
+
"grad_norm": 0.2070658802986145,
|
| 957 |
+
"learning_rate": 2.310614508226078e-06,
|
| 958 |
+
"loss": 1.6242,
|
| 959 |
+
"step": 1300
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"epoch": 2.0787464048398294,
|
| 963 |
+
"grad_norm": 0.257577121257782,
|
| 964 |
+
"learning_rate": 2.0885229734943502e-06,
|
| 965 |
+
"loss": 0.7873,
|
| 966 |
+
"step": 1310
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"epoch": 2.094614698006546,
|
| 970 |
+
"grad_norm": 0.10661296546459198,
|
| 971 |
+
"learning_rate": 1.8771861768777792e-06,
|
| 972 |
+
"loss": 10.207,
|
| 973 |
+
"step": 1320
|
| 974 |
+
},
|
| 975 |
+
{
|
| 976 |
+
"epoch": 2.110482991173262,
|
| 977 |
+
"grad_norm": 0.10888644307851791,
|
| 978 |
+
"learning_rate": 1.67670332070623e-06,
|
| 979 |
+
"loss": 0.0016,
|
| 980 |
+
"step": 1330
|
| 981 |
+
},
|
| 982 |
+
{
|
| 983 |
+
"epoch": 2.1263512843399783,
|
| 984 |
+
"grad_norm": 0.3325011134147644,
|
| 985 |
+
"learning_rate": 1.4871685124269008e-06,
|
| 986 |
+
"loss": 1.2929,
|
| 987 |
+
"step": 1340
|
| 988 |
+
},
|
| 989 |
+
{
|
| 990 |
+
"epoch": 2.1422195775066943,
|
| 991 |
+
"grad_norm": 0.17756295204162598,
|
| 992 |
+
"learning_rate": 1.3086707204299414e-06,
|
| 993 |
+
"loss": 0.5661,
|
| 994 |
+
"step": 1350
|
| 995 |
+
},
|
| 996 |
+
{
|
| 997 |
+
"epoch": 2.158087870673411,
|
| 998 |
+
"grad_norm": 0.14974133670330048,
|
| 999 |
+
"learning_rate": 1.141293732286297e-06,
|
| 1000 |
+
"loss": 0.1841,
|
| 1001 |
+
"step": 1360
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"epoch": 2.173956163840127,
|
| 1005 |
+
"grad_norm": 0.03758949786424637,
|
| 1006 |
+
"learning_rate": 9.851161154175337e-07,
|
| 1007 |
+
"loss": 9.0455,
|
| 1008 |
+
"step": 1370
|
| 1009 |
+
},
|
| 1010 |
+
{
|
| 1011 |
+
"epoch": 2.1898244570068433,
|
| 1012 |
+
"grad_norm": 0.11239814013242722,
|
| 1013 |
+
"learning_rate": 8.402111802159412e-07,
|
| 1014 |
+
"loss": 0.0012,
|
| 1015 |
+
"step": 1380
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"epoch": 2.2056927501735593,
|
| 1019 |
+
"grad_norm": 8.04785442352295,
|
| 1020 |
+
"learning_rate": 7.06646945632361e-07,
|
| 1021 |
+
"loss": 1.5707,
|
| 1022 |
+
"step": 1390
|
| 1023 |
+
},
|
| 1024 |
+
{
|
| 1025 |
+
"epoch": 2.2215610433402757,
|
| 1026 |
+
"grad_norm": 0.21494823694229126,
|
| 1027 |
+
"learning_rate": 5.844861072478336e-07,
|
| 1028 |
+
"loss": 1.3041,
|
| 1029 |
+
"step": 1400
|
| 1030 |
+
},
|
| 1031 |
+
{
|
| 1032 |
+
"epoch": 2.237429336506992,
|
| 1033 |
+
"grad_norm": 0.06655854731798172,
|
| 1034 |
+
"learning_rate": 4.7378600784402093e-07,
|
| 1035 |
+
"loss": 0.0125,
|
| 1036 |
+
"step": 1410
|
| 1037 |
+
},
|
| 1038 |
+
{
|
| 1039 |
+
"epoch": 2.253297629673708,
|
| 1040 |
+
"grad_norm": 0.04861687868833542,
|
| 1041 |
+
"learning_rate": 3.745986104862903e-07,
|
| 1042 |
+
"loss": 7.9542,
|
| 1043 |
+
"step": 1420
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"epoch": 2.2691659228404246,
|
| 1047 |
+
"grad_norm": 0.28239357471466064,
|
| 1048 |
+
"learning_rate": 2.869704741320478e-07,
|
| 1049 |
+
"loss": 0.0149,
|
| 1050 |
+
"step": 1430
|
| 1051 |
+
},
|
| 1052 |
+
{
|
| 1053 |
+
"epoch": 2.2850342160071406,
|
| 1054 |
+
"grad_norm": 3.703979730606079,
|
| 1055 |
+
"learning_rate": 2.1094273177576507e-07,
|
| 1056 |
+
"loss": 2.4253,
|
| 1057 |
+
"step": 1440
|
| 1058 |
+
},
|
| 1059 |
+
{
|
| 1060 |
+
"epoch": 2.300902509173857,
|
| 1061 |
+
"grad_norm": 12.898768424987793,
|
| 1062 |
+
"learning_rate": 1.4655107114101007e-07,
|
| 1063 |
+
"loss": 1.5493,
|
| 1064 |
+
"step": 1450
|
| 1065 |
+
},
|
| 1066 |
+
{
|
| 1067 |
+
"epoch": 2.316770802340573,
|
| 1068 |
+
"grad_norm": 0.1730128526687622,
|
| 1069 |
+
"learning_rate": 9.382571792846961e-08,
|
| 1070 |
+
"loss": 0.0024,
|
| 1071 |
+
"step": 1460
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"epoch": 2.3326390955072895,
|
| 1075 |
+
"grad_norm": 0.5157439708709717,
|
| 1076 |
+
"learning_rate": 5.279142162789019e-08,
|
| 1077 |
+
"loss": 10.4643,
|
| 1078 |
+
"step": 1470
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"epoch": 2.3485073886740055,
|
| 1082 |
+
"grad_norm": 0.039575349539518356,
|
| 1083 |
+
"learning_rate": 2.3467443900582198e-08,
|
| 1084 |
+
"loss": 0.0022,
|
| 1085 |
+
"step": 1480
|
| 1086 |
+
},
|
| 1087 |
+
{
|
| 1088 |
+
"epoch": 2.364375681840722,
|
| 1089 |
+
"grad_norm": 6.6210455894470215,
|
| 1090 |
+
"learning_rate": 5.86754953789681e-09,
|
| 1091 |
+
"loss": 2.0089,
|
| 1092 |
+
"step": 1490
|
| 1093 |
+
},
|
| 1094 |
+
{
|
| 1095 |
+
"epoch": 2.3802439750074385,
|
| 1096 |
+
"grad_norm": 0.16222581267356873,
|
| 1097 |
+
"learning_rate": 0.0,
|
| 1098 |
+
"loss": 0.4287,
|
| 1099 |
+
"step": 1500
|
| 1100 |
+
},
|
| 1101 |
+
{
|
| 1102 |
+
"epoch": 2.3802439750074385,
|
| 1103 |
+
"eval_loss": 0.15164193511009216,
|
| 1104 |
+
"eval_runtime": 20.031,
|
| 1105 |
+
"eval_samples_per_second": 53.018,
|
| 1106 |
+
"eval_steps_per_second": 26.509,
|
| 1107 |
+
"step": 1500
|
| 1108 |
}
|
| 1109 |
],
|
| 1110 |
"logging_steps": 10,
|
|
|
|
| 1128 |
"should_evaluate": false,
|
| 1129 |
"should_log": false,
|
| 1130 |
"should_save": true,
|
| 1131 |
+
"should_training_stop": true
|
| 1132 |
},
|
| 1133 |
"attributes": {}
|
| 1134 |
}
|
| 1135 |
},
|
| 1136 |
+
"total_flos": 6.594947874973286e+16,
|
| 1137 |
"train_batch_size": 2,
|
| 1138 |
"trial_name": null,
|
| 1139 |
"trial_params": null
|