| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.96, | |
| "eval_steps": 500, | |
| "global_step": 186, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.44993048906326294, | |
| "learning_rate": 1.9977186146800707e-05, | |
| "loss": 1.1852, | |
| "mean_token_accuracy": 0.7368780478835106, | |
| "num_tokens": 16177.0, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.446072518825531, | |
| "learning_rate": 1.9884683243281117e-05, | |
| "loss": 1.1334, | |
| "mean_token_accuracy": 0.7490372791886329, | |
| "num_tokens": 32676.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.5208024978637695, | |
| "learning_rate": 1.9721724257579907e-05, | |
| "loss": 1.1397, | |
| "mean_token_accuracy": 0.7374951928853989, | |
| "num_tokens": 48484.0, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.5335091352462769, | |
| "learning_rate": 1.9489470729364694e-05, | |
| "loss": 1.1291, | |
| "mean_token_accuracy": 0.7498478129506111, | |
| "num_tokens": 64399.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 0.6421738266944885, | |
| "learning_rate": 1.918957811620231e-05, | |
| "loss": 1.094, | |
| "mean_token_accuracy": 0.7465715885162354, | |
| "num_tokens": 80038.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.5590124726295471, | |
| "learning_rate": 1.8824183993782193e-05, | |
| "loss": 1.0891, | |
| "mean_token_accuracy": 0.7500069707632064, | |
| "num_tokens": 95591.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 0.5460164546966553, | |
| "learning_rate": 1.839589281969639e-05, | |
| "loss": 1.0278, | |
| "mean_token_accuracy": 0.7575115114450455, | |
| "num_tokens": 111483.0, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.6239035129547119, | |
| "learning_rate": 1.7907757369376984e-05, | |
| "loss": 0.9952, | |
| "mean_token_accuracy": 0.7626694276928901, | |
| "num_tokens": 128244.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 0.6833351254463196, | |
| "learning_rate": 1.7363256976511972e-05, | |
| "loss": 1.0109, | |
| "mean_token_accuracy": 0.760352547466755, | |
| "num_tokens": 142495.0, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.6993600130081177, | |
| "learning_rate": 1.6766272733037575e-05, | |
| "loss": 0.9705, | |
| "mean_token_accuracy": 0.768488883972168, | |
| "num_tokens": 158074.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 0.7641519904136658, | |
| "learning_rate": 1.612105982547663e-05, | |
| "loss": 0.8792, | |
| "mean_token_accuracy": 0.7812322407960892, | |
| "num_tokens": 174411.0, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.8612833023071289, | |
| "learning_rate": 1.543221720480419e-05, | |
| "loss": 0.8912, | |
| "mean_token_accuracy": 0.787124752998352, | |
| "num_tokens": 189480.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 1.032, | |
| "grad_norm": 0.9852380752563477, | |
| "learning_rate": 1.4704654806027558e-05, | |
| "loss": 0.8234, | |
| "mean_token_accuracy": 0.7974502179357741, | |
| "num_tokens": 203798.0, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 1.112, | |
| "grad_norm": 0.8131120204925537, | |
| "learning_rate": 1.3943558551133186e-05, | |
| "loss": 0.8048, | |
| "mean_token_accuracy": 0.8055687263607979, | |
| "num_tokens": 219483.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 1.192, | |
| "grad_norm": 1.0975677967071533, | |
| "learning_rate": 1.3154353384852559e-05, | |
| "loss": 0.7795, | |
| "mean_token_accuracy": 0.8085009381175041, | |
| "num_tokens": 234730.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 1.272, | |
| "grad_norm": 0.8204861879348755, | |
| "learning_rate": 1.2342664606720823e-05, | |
| "loss": 0.7457, | |
| "mean_token_accuracy": 0.822769646346569, | |
| "num_tokens": 250096.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.3519999999999999, | |
| "grad_norm": 0.7401573061943054, | |
| "learning_rate": 1.1514277775045768e-05, | |
| "loss": 0.7079, | |
| "mean_token_accuracy": 0.8261085689067841, | |
| "num_tokens": 266076.0, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.432, | |
| "grad_norm": 0.6010871529579163, | |
| "learning_rate": 1.0675097468583653e-05, | |
| "loss": 0.7074, | |
| "mean_token_accuracy": 0.8272138416767121, | |
| "num_tokens": 281385.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.512, | |
| "grad_norm": 0.5529438257217407, | |
| "learning_rate": 9.83110519986069e-06, | |
| "loss": 0.6664, | |
| "mean_token_accuracy": 0.8329674065113067, | |
| "num_tokens": 296647.0, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.592, | |
| "grad_norm": 0.5067458152770996, | |
| "learning_rate": 8.98831678012568e-06, | |
| "loss": 0.6526, | |
| "mean_token_accuracy": 0.8348439291119576, | |
| "num_tokens": 312067.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.6720000000000002, | |
| "grad_norm": 0.5340730547904968, | |
| "learning_rate": 8.15273943982811e-06, | |
| "loss": 0.675, | |
| "mean_token_accuracy": 0.8353347033262253, | |
| "num_tokens": 327458.0, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.752, | |
| "grad_norm": 0.4149323105812073, | |
| "learning_rate": 7.330329010258483e-06, | |
| "loss": 0.6392, | |
| "mean_token_accuracy": 0.8354402139782906, | |
| "num_tokens": 344098.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.8319999999999999, | |
| "grad_norm": 0.42973241209983826, | |
| "learning_rate": 6.526947471551799e-06, | |
| "loss": 0.6585, | |
| "mean_token_accuracy": 0.8377262473106384, | |
| "num_tokens": 359964.0, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.912, | |
| "grad_norm": 0.38989219069480896, | |
| "learning_rate": 5.748321169643596e-06, | |
| "loss": 0.6001, | |
| "mean_token_accuracy": 0.8486244857311249, | |
| "num_tokens": 376782.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.992, | |
| "grad_norm": 0.3842228651046753, | |
| "learning_rate": 5.000000000000003e-06, | |
| "loss": 0.6408, | |
| "mean_token_accuracy": 0.8384670644998551, | |
| "num_tokens": 393357.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.064, | |
| "grad_norm": 0.386435866355896, | |
| "learning_rate": 4.287317849052075e-06, | |
| "loss": 0.6318, | |
| "mean_token_accuracy": 0.8400428146123886, | |
| "num_tokens": 408486.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 2.144, | |
| "grad_norm": 0.4155893325805664, | |
| "learning_rate": 3.6153545753001663e-06, | |
| "loss": 0.6365, | |
| "mean_token_accuracy": 0.8424267500638962, | |
| "num_tokens": 423462.0, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 2.224, | |
| "grad_norm": 0.42164576053619385, | |
| "learning_rate": 2.9888998010794745e-06, | |
| "loss": 0.65, | |
| "mean_token_accuracy": 0.8395944103598595, | |
| "num_tokens": 439308.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 2.304, | |
| "grad_norm": 0.40206339955329895, | |
| "learning_rate": 2.4124187730720916e-06, | |
| "loss": 0.6441, | |
| "mean_token_accuracy": 0.8418430313467979, | |
| "num_tokens": 454859.0, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 2.384, | |
| "grad_norm": 0.41322216391563416, | |
| "learning_rate": 1.8900205349049904e-06, | |
| "loss": 0.6169, | |
| "mean_token_accuracy": 0.8490483820438385, | |
| "num_tokens": 470548.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 2.464, | |
| "grad_norm": 0.4834882915019989, | |
| "learning_rate": 1.425428638693489e-06, | |
| "loss": 0.6545, | |
| "mean_token_accuracy": 0.8358306258916854, | |
| "num_tokens": 485169.0, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.544, | |
| "grad_norm": 0.41212084889411926, | |
| "learning_rate": 1.0219546042925842e-06, | |
| "loss": 0.6087, | |
| "mean_token_accuracy": 0.8500525638461113, | |
| "num_tokens": 501284.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.624, | |
| "grad_norm": 0.4313817620277405, | |
| "learning_rate": 6.824743154333157e-07, | |
| "loss": 0.6387, | |
| "mean_token_accuracy": 0.8408319368958473, | |
| "num_tokens": 517799.0, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.7039999999999997, | |
| "grad_norm": 0.38043487071990967, | |
| "learning_rate": 4.094075209879789e-07, | |
| "loss": 0.5942, | |
| "mean_token_accuracy": 0.8503163874149322, | |
| "num_tokens": 534446.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.784, | |
| "grad_norm": 0.38836878538131714, | |
| "learning_rate": 2.0470058747505516e-07, | |
| "loss": 0.6464, | |
| "mean_token_accuracy": 0.8419791996479035, | |
| "num_tokens": 549141.0, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.864, | |
| "grad_norm": 0.4290372133255005, | |
| "learning_rate": 6.981262574066395e-08, | |
| "loss": 0.657, | |
| "mean_token_accuracy": 0.8399893954396248, | |
| "num_tokens": 564279.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.944, | |
| "grad_norm": 0.3860047161579132, | |
| "learning_rate": 5.705090702819993e-09, | |
| "loss": 0.6336, | |
| "mean_token_accuracy": 0.8413404822349548, | |
| "num_tokens": 580854.0, | |
| "step": 185 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 186, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4332405321474048.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |