| { | |
| "best_metric": 0.9843817398562538, | |
| "best_model_checkpoint": "./exps/1_fold_RobertaRBERT/checkpoint-22000", | |
| "epoch": 1.1827271994194017, | |
| "global_step": 22000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 1.0693175711982806e-05, | |
| "loss": 0.3694, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 1.9924167397640137e-05, | |
| "loss": 0.1227, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "eval_accuracy": 0.9525065963060686, | |
| "eval_loss": 0.12871456146240234, | |
| "eval_runtime": 1262.9688, | |
| "eval_samples_per_second": 141.341, | |
| "eval_steps_per_second": 4.417, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 1.9358252454656067e-05, | |
| "loss": 0.077, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 1.8792337511671998e-05, | |
| "loss": 0.0597, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_accuracy": 0.977625777971979, | |
| "eval_loss": 0.07220922410488129, | |
| "eval_runtime": 1261.8214, | |
| "eval_samples_per_second": 141.469, | |
| "eval_steps_per_second": 4.421, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 1.8226422568687928e-05, | |
| "loss": 0.0492, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 1.766050762570386e-05, | |
| "loss": 0.0429, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "eval_accuracy": 0.9803035141085323, | |
| "eval_loss": 0.07511359453201294, | |
| "eval_runtime": 1262.1321, | |
| "eval_samples_per_second": 141.434, | |
| "eval_steps_per_second": 4.42, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 1.709459268271979e-05, | |
| "loss": 0.0385, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 1.652867773973572e-05, | |
| "loss": 0.0356, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "eval_accuracy": 0.9810037589141164, | |
| "eval_loss": 0.07426761090755463, | |
| "eval_runtime": 1264.8606, | |
| "eval_samples_per_second": 141.129, | |
| "eval_steps_per_second": 4.411, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.5963328711694633e-05, | |
| "loss": 0.0326, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 1.5397413768710567e-05, | |
| "loss": 0.0288, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "eval_accuracy": 0.9811886235427906, | |
| "eval_loss": 0.0728357657790184, | |
| "eval_runtime": 1260.8301, | |
| "eval_samples_per_second": 141.581, | |
| "eval_steps_per_second": 4.425, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 1.4831498825726494e-05, | |
| "loss": 0.0281, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 1.4265583882742424e-05, | |
| "loss": 0.0266, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "eval_accuracy": 0.9783652364866757, | |
| "eval_loss": 0.08940344303846359, | |
| "eval_runtime": 1265.0154, | |
| "eval_samples_per_second": 141.112, | |
| "eval_steps_per_second": 4.41, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 1.370023485470134e-05, | |
| "loss": 0.0244, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.313431991171727e-05, | |
| "loss": 0.0225, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "eval_accuracy": 0.9823538308992824, | |
| "eval_loss": 0.06773301213979721, | |
| "eval_runtime": 1260.1554, | |
| "eval_samples_per_second": 141.656, | |
| "eval_steps_per_second": 4.427, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.2568970883676185e-05, | |
| "loss": 0.0206, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 1.2003055940692115e-05, | |
| "loss": 0.0208, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "eval_accuracy": 0.9828524052008583, | |
| "eval_loss": 0.08000749349594116, | |
| "eval_runtime": 1262.4987, | |
| "eval_samples_per_second": 141.393, | |
| "eval_steps_per_second": 4.419, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 1.1437140997708046e-05, | |
| "loss": 0.0199, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 1.087179196966696e-05, | |
| "loss": 0.0182, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "eval_accuracy": 0.9813734881714647, | |
| "eval_loss": 0.08186674118041992, | |
| "eval_runtime": 1260.8606, | |
| "eval_samples_per_second": 141.577, | |
| "eval_steps_per_second": 4.425, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 1.030587702668289e-05, | |
| "loss": 0.0159, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 9.739962083698821e-06, | |
| "loss": 0.0137, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "eval_accuracy": 0.9839727968897927, | |
| "eval_loss": 0.062463145703077316, | |
| "eval_runtime": 1261.7007, | |
| "eval_samples_per_second": 141.483, | |
| "eval_steps_per_second": 4.422, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 9.174047140714752e-06, | |
| "loss": 0.0123, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 8.60926402761665e-06, | |
| "loss": 0.0138, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "eval_accuracy": 0.9843817398562538, | |
| "eval_loss": 0.07732487469911575, | |
| "eval_runtime": 1262.988, | |
| "eval_samples_per_second": 141.339, | |
| "eval_steps_per_second": 4.417, | |
| "step": 22000 | |
| } | |
| ], | |
| "max_steps": 37202, | |
| "num_train_epochs": 2, | |
| "total_flos": 2.7232075889551824e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |