| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 16.0, | |
| "eval_steps": 500, | |
| "global_step": 1004, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.9880478087649402, | |
| "grad_norm": 0.2889673113822937, | |
| "learning_rate": 0.0002974798061389337, | |
| "loss": 0.7635, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 1.9920318725099602, | |
| "grad_norm": 0.34704622626304626, | |
| "learning_rate": 0.0002944264943457189, | |
| "loss": 0.5643, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 2.99601593625498, | |
| "grad_norm": 0.32337236404418945, | |
| "learning_rate": 0.00029137318255250403, | |
| "loss": 0.47, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.5398977994918823, | |
| "learning_rate": 0.00028831987075928915, | |
| "loss": 0.3844, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 4.98804780876494, | |
| "grad_norm": 0.4496667981147766, | |
| "learning_rate": 0.0002853150242326333, | |
| "loss": 0.3176, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 5.99203187250996, | |
| "grad_norm": 0.4373694062232971, | |
| "learning_rate": 0.0002822617124394184, | |
| "loss": 0.2479, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 6.99601593625498, | |
| "grad_norm": 0.5146955251693726, | |
| "learning_rate": 0.0002792084006462035, | |
| "loss": 0.1915, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.5761932134628296, | |
| "learning_rate": 0.00027615508885298865, | |
| "loss": 0.1465, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 8.98804780876494, | |
| "grad_norm": 0.46838995814323425, | |
| "learning_rate": 0.00027315024232633277, | |
| "loss": 0.1184, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 9.99203187250996, | |
| "grad_norm": 0.39898747205734253, | |
| "learning_rate": 0.0002700969305331179, | |
| "loss": 0.0931, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 10.996015936254981, | |
| "grad_norm": 0.4515402615070343, | |
| "learning_rate": 0.000267043618739903, | |
| "loss": 0.0776, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "grad_norm": 0.2392401546239853, | |
| "learning_rate": 0.0002639903069466882, | |
| "loss": 0.0685, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 12.98804780876494, | |
| "grad_norm": 0.3774527609348297, | |
| "learning_rate": 0.00026098546042003227, | |
| "loss": 0.0622, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 13.99203187250996, | |
| "grad_norm": 0.22206874191761017, | |
| "learning_rate": 0.0002579321486268174, | |
| "loss": 0.0534, | |
| "step": 878 | |
| }, | |
| { | |
| "epoch": 14.996015936254981, | |
| "grad_norm": 0.25606569647789, | |
| "learning_rate": 0.00025487883683360257, | |
| "loss": 0.0504, | |
| "step": 941 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "grad_norm": 0.3752060830593109, | |
| "learning_rate": 0.0002518255250403877, | |
| "loss": 0.0469, | |
| "step": 1004 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 6200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 10, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.211538100349665e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |