| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 416, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.09615384615384616, | |
| "grad_norm": 1.2597733736038208, | |
| "learning_rate": 2.380952380952381e-06, | |
| "loss": 0.3483, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.19230769230769232, | |
| "grad_norm": 0.9227814078330994, | |
| "learning_rate": 4.761904761904762e-06, | |
| "loss": 0.3115, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.28846153846153844, | |
| "grad_norm": 0.5784149169921875, | |
| "learning_rate": 7.1428571428571436e-06, | |
| "loss": 0.2758, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.38461538461538464, | |
| "grad_norm": 0.5355926156044006, | |
| "learning_rate": 9.523809523809525e-06, | |
| "loss": 0.2754, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.4807692307692308, | |
| "grad_norm": 0.5469948053359985, | |
| "learning_rate": 9.997469951095982e-06, | |
| "loss": 0.258, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5769230769230769, | |
| "grad_norm": 0.5373064875602722, | |
| "learning_rate": 9.987196015552742e-06, | |
| "loss": 0.2596, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.6730769230769231, | |
| "grad_norm": 0.576485812664032, | |
| "learning_rate": 9.969036298013732e-06, | |
| "loss": 0.2583, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 0.5451536178588867, | |
| "learning_rate": 9.943019512725026e-06, | |
| "loss": 0.2672, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.8653846153846154, | |
| "grad_norm": 0.6965824961662292, | |
| "learning_rate": 9.9091867975718e-06, | |
| "loss": 0.2573, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.9615384615384616, | |
| "grad_norm": 0.6300221681594849, | |
| "learning_rate": 9.867591649030863e-06, | |
| "loss": 0.2636, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0576923076923077, | |
| "grad_norm": 0.4234144687652588, | |
| "learning_rate": 9.818299837581758e-06, | |
| "loss": 0.2199, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.1538461538461537, | |
| "grad_norm": 0.4467966854572296, | |
| "learning_rate": 9.761389303710085e-06, | |
| "loss": 0.2019, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "grad_norm": 0.44141167402267456, | |
| "learning_rate": 9.696950034667595e-06, | |
| "loss": 0.1926, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.3461538461538463, | |
| "grad_norm": 0.5096948742866516, | |
| "learning_rate": 9.62508392218384e-06, | |
| "loss": 0.1994, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.4423076923076923, | |
| "grad_norm": 0.4182388186454773, | |
| "learning_rate": 9.545904601354402e-06, | |
| "loss": 0.1918, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 0.5185367465019226, | |
| "learning_rate": 9.459537270960464e-06, | |
| "loss": 0.1948, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.6346153846153846, | |
| "grad_norm": 0.5209194421768188, | |
| "learning_rate": 9.36611849550378e-06, | |
| "loss": 0.2015, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.7307692307692308, | |
| "grad_norm": 0.5025277137756348, | |
| "learning_rate": 9.265795989270148e-06, | |
| "loss": 0.2022, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.8269230769230769, | |
| "grad_norm": 0.4965076148509979, | |
| "learning_rate": 9.158728382762753e-06, | |
| "loss": 0.2023, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.9230769230769231, | |
| "grad_norm": 0.5124719142913818, | |
| "learning_rate": 9.045084971874738e-06, | |
| "loss": 0.1962, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.019230769230769, | |
| "grad_norm": 0.4854343831539154, | |
| "learning_rate": 8.925045450197593e-06, | |
| "loss": 0.1821, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 2.1153846153846154, | |
| "grad_norm": 0.3944475054740906, | |
| "learning_rate": 8.798799624888665e-06, | |
| "loss": 0.1239, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.2115384615384617, | |
| "grad_norm": 0.5081722736358643, | |
| "learning_rate": 8.666547116547015e-06, | |
| "loss": 0.1273, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 2.3076923076923075, | |
| "grad_norm": 0.42684561014175415, | |
| "learning_rate": 8.528497043572222e-06, | |
| "loss": 0.1211, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 2.4038461538461537, | |
| "grad_norm": 0.40685558319091797, | |
| "learning_rate": 8.38486769150522e-06, | |
| "loss": 0.1275, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 0.5295878648757935, | |
| "learning_rate": 8.235886167873975e-06, | |
| "loss": 0.1333, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 2.5961538461538463, | |
| "grad_norm": 0.4011670649051666, | |
| "learning_rate": 8.081788043089818e-06, | |
| "loss": 0.124, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 2.6923076923076925, | |
| "grad_norm": 0.4178542494773865, | |
| "learning_rate": 7.922816977962191e-06, | |
| "loss": 0.1271, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 2.7884615384615383, | |
| "grad_norm": 0.39880824089050293, | |
| "learning_rate": 7.759224338420832e-06, | |
| "loss": 0.1277, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 2.8846153846153846, | |
| "grad_norm": 0.4713616669178009, | |
| "learning_rate": 7.591268798054569e-06, | |
| "loss": 0.13, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.980769230769231, | |
| "grad_norm": 0.4073733389377594, | |
| "learning_rate": 7.419215929095211e-06, | |
| "loss": 0.1268, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 3.076923076923077, | |
| "grad_norm": 0.5005382299423218, | |
| "learning_rate": 7.243337782493255e-06, | |
| "loss": 0.0846, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 3.173076923076923, | |
| "grad_norm": 0.41398388147354126, | |
| "learning_rate": 7.063912457749426e-06, | |
| "loss": 0.0733, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 3.269230769230769, | |
| "grad_norm": 0.46325525641441345, | |
| "learning_rate": 6.8812236631821886e-06, | |
| "loss": 0.0795, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 3.3653846153846154, | |
| "grad_norm": 0.4737342596054077, | |
| "learning_rate": 6.695560267326599e-06, | |
| "loss": 0.0782, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.4615384615384617, | |
| "grad_norm": 0.4067882299423218, | |
| "learning_rate": 6.507215842173758e-06, | |
| "loss": 0.0796, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 3.5576923076923075, | |
| "grad_norm": 0.42072340846061707, | |
| "learning_rate": 6.316488198973162e-06, | |
| "loss": 0.0774, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 3.6538461538461537, | |
| "grad_norm": 0.39316093921661377, | |
| "learning_rate": 6.123678917331902e-06, | |
| "loss": 0.0774, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "grad_norm": 0.4232267439365387, | |
| "learning_rate": 5.9290928683553105e-06, | |
| "loss": 0.0789, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 3.8461538461538463, | |
| "grad_norm": 0.45357951521873474, | |
| "learning_rate": 5.733037732583091e-06, | |
| "loss": 0.0808, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 3.9423076923076925, | |
| "grad_norm": 0.5127453207969666, | |
| "learning_rate": 5.535823513483123e-06, | |
| "loss": 0.0806, | |
| "step": 410 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 832, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 42586106445824.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |