shulijia commited on
Commit
74d1073
·
verified ·
1 Parent(s): 0f27b2a

Training in progress, step 1500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53cd5e9c47598630429a0f3802a2cdb8986ab1df4396085d2e9cb28ab20196d2
3
  size 2384234968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:782e8ac1da50805a0148b309fba266f389439b051c131233056ebbdcad3f58ee
3
  size 2384234968
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8acd0aae640672ec13140a99f0376026f7718d96d3224b6a8c7639108b56103
3
  size 4768663315
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c664d5bbe4243e2d8db79e79e6b2516a49619d23a025b2135048a5763a753ef7
3
  size 4768663315
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de69a2834426ff9ef8199d077e00892579278af31d8969d77f98235b5cfc010a
3
  size 14645
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2534e434cd5abbb8f7668d3eab0549db0ef95d6a797a3efa86b712e8e32266a7
3
  size 14645
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a217269145a96df9b63b449d5f2df3bd2a63aa118b7645f02b3a7f2873da81a5
3
  size 1465
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83e59f17c2c0681a7d8c091fe9bc022de98a5c7804170d371d6b77c623b084c5
3
  size 1465
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 1.9658224735677403,
6
  "eval_steps": 100,
7
- "global_step": 1000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -908,6 +908,456 @@
908
  "mean_token_accuracy": 0.935359588265419,
909
  "num_tokens": 8186880.0,
910
  "step": 1000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
911
  }
912
  ],
913
  "logging_steps": 10,
@@ -927,7 +1377,7 @@
927
  "attributes": {}
928
  }
929
  },
930
- "total_flos": 2.163632523706368e+16,
931
  "train_batch_size": 2,
932
  "trial_name": null,
933
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 2.9481190066388,
6
  "eval_steps": 100,
7
+ "global_step": 1500,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
908
  "mean_token_accuracy": 0.935359588265419,
909
  "num_tokens": 8186880.0,
910
  "step": 1000
911
+ },
912
+ {
913
+ "epoch": 1.985492992377674,
914
+ "grad_norm": 0.6394369602203369,
915
+ "learning_rate": 3.770014556040757e-06,
916
+ "loss": 0.0759,
917
+ "mean_token_accuracy": 0.9349559672176838,
918
+ "num_tokens": 8268800.0,
919
+ "step": 1010
920
+ },
921
+ {
922
+ "epoch": 2.0039341037619867,
923
+ "grad_norm": 0.7817333340644836,
924
+ "learning_rate": 3.6972343522561864e-06,
925
+ "loss": 0.0759,
926
+ "mean_token_accuracy": 0.935172860622406,
927
+ "num_tokens": 8345600.0,
928
+ "step": 1020
929
+ },
930
+ {
931
+ "epoch": 2.0236046225719204,
932
+ "grad_norm": 0.8923552632331848,
933
+ "learning_rate": 3.624454148471616e-06,
934
+ "loss": 0.0694,
935
+ "mean_token_accuracy": 0.937181992828846,
936
+ "num_tokens": 8427520.0,
937
+ "step": 1030
938
+ },
939
+ {
940
+ "epoch": 2.0432751413818537,
941
+ "grad_norm": 0.7769061326980591,
942
+ "learning_rate": 3.551673944687045e-06,
943
+ "loss": 0.0723,
944
+ "mean_token_accuracy": 0.9345278829336167,
945
+ "num_tokens": 8509440.0,
946
+ "step": 1040
947
+ },
948
+ {
949
+ "epoch": 2.0629456601917875,
950
+ "grad_norm": 0.6698974967002869,
951
+ "learning_rate": 3.4788937409024746e-06,
952
+ "loss": 0.0712,
953
+ "mean_token_accuracy": 0.9358243614435195,
954
+ "num_tokens": 8591360.0,
955
+ "step": 1050
956
+ },
957
+ {
958
+ "epoch": 2.0826161790017212,
959
+ "grad_norm": 0.8802694082260132,
960
+ "learning_rate": 3.406113537117904e-06,
961
+ "loss": 0.0687,
962
+ "mean_token_accuracy": 0.9373654581606388,
963
+ "num_tokens": 8673280.0,
964
+ "step": 1060
965
+ },
966
+ {
967
+ "epoch": 2.102286697811655,
968
+ "grad_norm": 1.0914084911346436,
969
+ "learning_rate": 3.3333333333333333e-06,
970
+ "loss": 0.0727,
971
+ "mean_token_accuracy": 0.9355552814900875,
972
+ "num_tokens": 8755200.0,
973
+ "step": 1070
974
+ },
975
+ {
976
+ "epoch": 2.1219572166215883,
977
+ "grad_norm": 0.9575169086456299,
978
+ "learning_rate": 3.260553129548763e-06,
979
+ "loss": 0.0693,
980
+ "mean_token_accuracy": 0.9379770055413246,
981
+ "num_tokens": 8837120.0,
982
+ "step": 1080
983
+ },
984
+ {
985
+ "epoch": 2.141627735431522,
986
+ "grad_norm": 0.7972959876060486,
987
+ "learning_rate": 3.1877729257641924e-06,
988
+ "loss": 0.067,
989
+ "mean_token_accuracy": 0.9371819935739041,
990
+ "num_tokens": 8919040.0,
991
+ "step": 1090
992
+ },
993
+ {
994
+ "epoch": 2.161298254241456,
995
+ "grad_norm": 0.9708478450775146,
996
+ "learning_rate": 3.1149927219796215e-06,
997
+ "loss": 0.0728,
998
+ "mean_token_accuracy": 0.9354329705238342,
999
+ "num_tokens": 9000960.0,
1000
+ "step": 1100
1001
+ },
1002
+ {
1003
+ "epoch": 2.180968773051389,
1004
+ "grad_norm": 1.0096272230148315,
1005
+ "learning_rate": 3.042212518195051e-06,
1006
+ "loss": 0.0717,
1007
+ "mean_token_accuracy": 0.9340875700116158,
1008
+ "num_tokens": 9082880.0,
1009
+ "step": 1110
1010
+ },
1011
+ {
1012
+ "epoch": 2.200639291861323,
1013
+ "grad_norm": 0.9769055247306824,
1014
+ "learning_rate": 2.9694323144104806e-06,
1015
+ "loss": 0.0729,
1016
+ "mean_token_accuracy": 0.9370596796274185,
1017
+ "num_tokens": 9164800.0,
1018
+ "step": 1120
1019
+ },
1020
+ {
1021
+ "epoch": 2.2203098106712567,
1022
+ "grad_norm": 0.8182367086410522,
1023
+ "learning_rate": 2.8966521106259098e-06,
1024
+ "loss": 0.0714,
1025
+ "mean_token_accuracy": 0.936338060349226,
1026
+ "num_tokens": 9246720.0,
1027
+ "step": 1130
1028
+ },
1029
+ {
1030
+ "epoch": 2.23998032948119,
1031
+ "grad_norm": 0.9285300374031067,
1032
+ "learning_rate": 2.8238719068413393e-06,
1033
+ "loss": 0.0717,
1034
+ "mean_token_accuracy": 0.9357020534574986,
1035
+ "num_tokens": 9328640.0,
1036
+ "step": 1140
1037
+ },
1038
+ {
1039
+ "epoch": 2.2596508482911237,
1040
+ "grad_norm": 0.9658852219581604,
1041
+ "learning_rate": 2.7510917030567684e-06,
1042
+ "loss": 0.0722,
1043
+ "mean_token_accuracy": 0.9356164373457432,
1044
+ "num_tokens": 9410560.0,
1045
+ "step": 1150
1046
+ },
1047
+ {
1048
+ "epoch": 2.2793213671010575,
1049
+ "grad_norm": 0.8002274632453918,
1050
+ "learning_rate": 2.678311499272198e-06,
1051
+ "loss": 0.0688,
1052
+ "mean_token_accuracy": 0.9368272982537746,
1053
+ "num_tokens": 9492480.0,
1054
+ "step": 1160
1055
+ },
1056
+ {
1057
+ "epoch": 2.298991885910991,
1058
+ "grad_norm": 0.7488352060317993,
1059
+ "learning_rate": 2.6055312954876275e-06,
1060
+ "loss": 0.0696,
1061
+ "mean_token_accuracy": 0.9345523476600647,
1062
+ "num_tokens": 9574400.0,
1063
+ "step": 1170
1064
+ },
1065
+ {
1066
+ "epoch": 2.3186624047209246,
1067
+ "grad_norm": 0.7775608897209167,
1068
+ "learning_rate": 2.5327510917030567e-06,
1069
+ "loss": 0.0701,
1070
+ "mean_token_accuracy": 0.9374877654016018,
1071
+ "num_tokens": 9656320.0,
1072
+ "step": 1180
1073
+ },
1074
+ {
1075
+ "epoch": 2.3383329235308583,
1076
+ "grad_norm": 0.720152735710144,
1077
+ "learning_rate": 2.4599708879184862e-06,
1078
+ "loss": 0.0686,
1079
+ "mean_token_accuracy": 0.9362769082188607,
1080
+ "num_tokens": 9738240.0,
1081
+ "step": 1190
1082
+ },
1083
+ {
1084
+ "epoch": 2.3580034423407916,
1085
+ "grad_norm": 0.8474797010421753,
1086
+ "learning_rate": 2.3871906841339158e-06,
1087
+ "loss": 0.0692,
1088
+ "mean_token_accuracy": 0.9355308227241039,
1089
+ "num_tokens": 9820160.0,
1090
+ "step": 1200
1091
+ },
1092
+ {
1093
+ "epoch": 2.3776739611507254,
1094
+ "grad_norm": 0.9920778870582581,
1095
+ "learning_rate": 2.3144104803493453e-06,
1096
+ "loss": 0.0753,
1097
+ "mean_token_accuracy": 0.9328767113387585,
1098
+ "num_tokens": 9902080.0,
1099
+ "step": 1210
1100
+ },
1101
+ {
1102
+ "epoch": 2.397344479960659,
1103
+ "grad_norm": 0.8659960627555847,
1104
+ "learning_rate": 2.2416302765647744e-06,
1105
+ "loss": 0.0699,
1106
+ "mean_token_accuracy": 0.9354452028870582,
1107
+ "num_tokens": 9984000.0,
1108
+ "step": 1220
1109
+ },
1110
+ {
1111
+ "epoch": 2.4170149987705924,
1112
+ "grad_norm": 1.2485941648483276,
1113
+ "learning_rate": 2.168850072780204e-06,
1114
+ "loss": 0.0716,
1115
+ "mean_token_accuracy": 0.9364236779510975,
1116
+ "num_tokens": 10065920.0,
1117
+ "step": 1230
1118
+ },
1119
+ {
1120
+ "epoch": 2.436685517580526,
1121
+ "grad_norm": 1.018085241317749,
1122
+ "learning_rate": 2.096069868995633e-06,
1123
+ "loss": 0.0694,
1124
+ "mean_token_accuracy": 0.9354696646332741,
1125
+ "num_tokens": 10147840.0,
1126
+ "step": 1240
1127
+ },
1128
+ {
1129
+ "epoch": 2.45635603639046,
1130
+ "grad_norm": 0.7919542789459229,
1131
+ "learning_rate": 2.0232896652110627e-06,
1132
+ "loss": 0.0702,
1133
+ "mean_token_accuracy": 0.9352005854249,
1134
+ "num_tokens": 10229760.0,
1135
+ "step": 1250
1136
+ },
1137
+ {
1138
+ "epoch": 2.4760265552003933,
1139
+ "grad_norm": 0.7705115079879761,
1140
+ "learning_rate": 1.9505094614264922e-06,
1141
+ "loss": 0.0713,
1142
+ "mean_token_accuracy": 0.93606898188591,
1143
+ "num_tokens": 10311680.0,
1144
+ "step": 1260
1145
+ },
1146
+ {
1147
+ "epoch": 2.495697074010327,
1148
+ "grad_norm": 1.0156041383743286,
1149
+ "learning_rate": 1.8777292576419216e-06,
1150
+ "loss": 0.0692,
1151
+ "mean_token_accuracy": 0.9370963759720325,
1152
+ "num_tokens": 10393600.0,
1153
+ "step": 1270
1154
+ },
1155
+ {
1156
+ "epoch": 2.515367592820261,
1157
+ "grad_norm": 0.8900818228721619,
1158
+ "learning_rate": 1.8049490538573511e-06,
1159
+ "loss": 0.0719,
1160
+ "mean_token_accuracy": 0.9380381591618061,
1161
+ "num_tokens": 10475520.0,
1162
+ "step": 1280
1163
+ },
1164
+ {
1165
+ "epoch": 2.535038111630194,
1166
+ "grad_norm": 0.8240485191345215,
1167
+ "learning_rate": 1.7321688500727804e-06,
1168
+ "loss": 0.0691,
1169
+ "mean_token_accuracy": 0.9369985312223434,
1170
+ "num_tokens": 10557440.0,
1171
+ "step": 1290
1172
+ },
1173
+ {
1174
+ "epoch": 2.554708630440128,
1175
+ "grad_norm": 1.0211061239242554,
1176
+ "learning_rate": 1.6593886462882098e-06,
1177
+ "loss": 0.0713,
1178
+ "mean_token_accuracy": 0.9360078267753125,
1179
+ "num_tokens": 10639360.0,
1180
+ "step": 1300
1181
+ },
1182
+ {
1183
+ "epoch": 2.5743791492500616,
1184
+ "grad_norm": 0.8435456156730652,
1185
+ "learning_rate": 1.5866084425036391e-06,
1186
+ "loss": 0.0685,
1187
+ "mean_token_accuracy": 0.9358488261699677,
1188
+ "num_tokens": 10721280.0,
1189
+ "step": 1310
1190
+ },
1191
+ {
1192
+ "epoch": 2.594049668059995,
1193
+ "grad_norm": 0.7593806982040405,
1194
+ "learning_rate": 1.5138282387190687e-06,
1195
+ "loss": 0.0692,
1196
+ "mean_token_accuracy": 0.9353962808847427,
1197
+ "num_tokens": 10803200.0,
1198
+ "step": 1320
1199
+ },
1200
+ {
1201
+ "epoch": 2.6137201868699287,
1202
+ "grad_norm": 0.8326400518417358,
1203
+ "learning_rate": 1.441048034934498e-06,
1204
+ "loss": 0.0722,
1205
+ "mean_token_accuracy": 0.9346868857741356,
1206
+ "num_tokens": 10885120.0,
1207
+ "step": 1330
1208
+ },
1209
+ {
1210
+ "epoch": 2.6333907056798624,
1211
+ "grad_norm": 0.8349006772041321,
1212
+ "learning_rate": 1.3682678311499273e-06,
1213
+ "loss": 0.0684,
1214
+ "mean_token_accuracy": 0.9371330693364144,
1215
+ "num_tokens": 10967040.0,
1216
+ "step": 1340
1217
+ },
1218
+ {
1219
+ "epoch": 2.6530612244897958,
1220
+ "grad_norm": 0.8567010760307312,
1221
+ "learning_rate": 1.2954876273653567e-06,
1222
+ "loss": 0.0707,
1223
+ "mean_token_accuracy": 0.9372064515948295,
1224
+ "num_tokens": 11048960.0,
1225
+ "step": 1350
1226
+ },
1227
+ {
1228
+ "epoch": 2.6727317432997295,
1229
+ "grad_norm": 1.053802728652954,
1230
+ "learning_rate": 1.222707423580786e-06,
1231
+ "loss": 0.0717,
1232
+ "mean_token_accuracy": 0.9355430543422699,
1233
+ "num_tokens": 11130880.0,
1234
+ "step": 1360
1235
+ },
1236
+ {
1237
+ "epoch": 2.6924022621096633,
1238
+ "grad_norm": 1.380603313446045,
1239
+ "learning_rate": 1.1499272197962156e-06,
1240
+ "loss": 0.0673,
1241
+ "mean_token_accuracy": 0.9371942266821861,
1242
+ "num_tokens": 11212800.0,
1243
+ "step": 1370
1244
+ },
1245
+ {
1246
+ "epoch": 2.7120727809195966,
1247
+ "grad_norm": 1.00258207321167,
1248
+ "learning_rate": 1.077147016011645e-06,
1249
+ "loss": 0.0698,
1250
+ "mean_token_accuracy": 0.9354329757392407,
1251
+ "num_tokens": 11294720.0,
1252
+ "step": 1380
1253
+ },
1254
+ {
1255
+ "epoch": 2.7317432997295303,
1256
+ "grad_norm": 0.6895238757133484,
1257
+ "learning_rate": 1.0043668122270742e-06,
1258
+ "loss": 0.0696,
1259
+ "mean_token_accuracy": 0.9364481404423713,
1260
+ "num_tokens": 11376640.0,
1261
+ "step": 1390
1262
+ },
1263
+ {
1264
+ "epoch": 2.751413818539464,
1265
+ "grad_norm": 0.7581794857978821,
1266
+ "learning_rate": 9.315866084425038e-07,
1267
+ "loss": 0.0723,
1268
+ "mean_token_accuracy": 0.9343321897089482,
1269
+ "num_tokens": 11458560.0,
1270
+ "step": 1400
1271
+ },
1272
+ {
1273
+ "epoch": 2.7710843373493974,
1274
+ "grad_norm": 0.8926005959510803,
1275
+ "learning_rate": 8.58806404657933e-07,
1276
+ "loss": 0.0712,
1277
+ "mean_token_accuracy": 0.9362157486379147,
1278
+ "num_tokens": 11540480.0,
1279
+ "step": 1410
1280
+ },
1281
+ {
1282
+ "epoch": 2.790754856159331,
1283
+ "grad_norm": 1.136520504951477,
1284
+ "learning_rate": 7.860262008733626e-07,
1285
+ "loss": 0.07,
1286
+ "mean_token_accuracy": 0.936668298393488,
1287
+ "num_tokens": 11622400.0,
1288
+ "step": 1420
1289
+ },
1290
+ {
1291
+ "epoch": 2.810425374969265,
1292
+ "grad_norm": 0.910785436630249,
1293
+ "learning_rate": 7.132459970887918e-07,
1294
+ "loss": 0.0669,
1295
+ "mean_token_accuracy": 0.9368028342723846,
1296
+ "num_tokens": 11704320.0,
1297
+ "step": 1430
1298
+ },
1299
+ {
1300
+ "epoch": 2.8300958937791982,
1301
+ "grad_norm": 1.0571502447128296,
1302
+ "learning_rate": 6.404657933042214e-07,
1303
+ "loss": 0.072,
1304
+ "mean_token_accuracy": 0.9329623252153396,
1305
+ "num_tokens": 11786240.0,
1306
+ "step": 1440
1307
+ },
1308
+ {
1309
+ "epoch": 2.849766412589132,
1310
+ "grad_norm": 0.9572676420211792,
1311
+ "learning_rate": 5.676855895196507e-07,
1312
+ "loss": 0.0723,
1313
+ "mean_token_accuracy": 0.9353106647729874,
1314
+ "num_tokens": 11868160.0,
1315
+ "step": 1450
1316
+ },
1317
+ {
1318
+ "epoch": 2.8694369313990657,
1319
+ "grad_norm": 1.240408182144165,
1320
+ "learning_rate": 4.949053857350801e-07,
1321
+ "loss": 0.0712,
1322
+ "mean_token_accuracy": 0.9353962816298008,
1323
+ "num_tokens": 11950080.0,
1324
+ "step": 1460
1325
+ },
1326
+ {
1327
+ "epoch": 2.889107450208999,
1328
+ "grad_norm": 1.247115135192871,
1329
+ "learning_rate": 4.221251819505095e-07,
1330
+ "loss": 0.0706,
1331
+ "mean_token_accuracy": 0.9364236749708652,
1332
+ "num_tokens": 12032000.0,
1333
+ "step": 1470
1334
+ },
1335
+ {
1336
+ "epoch": 2.908777969018933,
1337
+ "grad_norm": 0.9491878747940063,
1338
+ "learning_rate": 3.4934497816593887e-07,
1339
+ "loss": 0.0686,
1340
+ "mean_token_accuracy": 0.9364603698253632,
1341
+ "num_tokens": 12113920.0,
1342
+ "step": 1480
1343
+ },
1344
+ {
1345
+ "epoch": 2.9284484878288666,
1346
+ "grad_norm": 0.7979325652122498,
1347
+ "learning_rate": 2.765647743813683e-07,
1348
+ "loss": 0.0695,
1349
+ "mean_token_accuracy": 0.9378180019557476,
1350
+ "num_tokens": 12195840.0,
1351
+ "step": 1490
1352
+ },
1353
+ {
1354
+ "epoch": 2.9481190066388,
1355
+ "grad_norm": 0.8599417209625244,
1356
+ "learning_rate": 2.0378457059679768e-07,
1357
+ "loss": 0.0727,
1358
+ "mean_token_accuracy": 0.9342954941093922,
1359
+ "num_tokens": 12277760.0,
1360
+ "step": 1500
1361
  }
1362
  ],
1363
  "logging_steps": 10,
 
1377
  "attributes": {}
1378
  }
1379
  },
1380
+ "total_flos": 3.244772227547136e+16,
1381
  "train_batch_size": 2,
1382
  "trial_name": null,
1383
  "trial_params": null