| { | |
| "best_metric": 0.05840596929192543, | |
| "best_model_checkpoint": "/content/drive/MyDrive/dataset_for_research/ct_rate/data/ct_rate_jpn/model_output/alabnii_jmedroberta-base-manbyo-wordpiece/checkpoint-9112", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 9112, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.21949078138718173, | |
| "grad_norm": 0.8158975839614868, | |
| "learning_rate": 1.8902546093064093e-05, | |
| "loss": 0.378, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.43898156277436345, | |
| "grad_norm": 0.7372651100158691, | |
| "learning_rate": 1.7805092186128183e-05, | |
| "loss": 0.2102, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6584723441615452, | |
| "grad_norm": 0.4202118515968323, | |
| "learning_rate": 1.6707638279192274e-05, | |
| "loss": 0.1547, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.8779631255487269, | |
| "grad_norm": 1.2505989074707031, | |
| "learning_rate": 1.561018437225637e-05, | |
| "loss": 0.1261, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9747219783435762, | |
| "eval_f1": 0.936358333589169, | |
| "eval_loss": 0.0987885594367981, | |
| "eval_precision": 0.9453846630711054, | |
| "eval_recall": 0.927502736893322, | |
| "eval_runtime": 53.9968, | |
| "eval_samples_per_second": 84.375, | |
| "eval_steps_per_second": 10.556, | |
| "step": 2278 | |
| }, | |
| { | |
| "epoch": 1.0974539069359086, | |
| "grad_norm": 0.5161768794059753, | |
| "learning_rate": 1.4512730465320458e-05, | |
| "loss": 0.1034, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.3169446883230904, | |
| "grad_norm": 0.7225932478904724, | |
| "learning_rate": 1.3415276558384549e-05, | |
| "loss": 0.0894, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.536435469710272, | |
| "grad_norm": 0.3510231375694275, | |
| "learning_rate": 1.2317822651448641e-05, | |
| "loss": 0.0771, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.755926251097454, | |
| "grad_norm": 0.47382861375808716, | |
| "learning_rate": 1.122036874451273e-05, | |
| "loss": 0.0732, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.9754170324846356, | |
| "grad_norm": 0.7921191453933716, | |
| "learning_rate": 1.0122914837576823e-05, | |
| "loss": 0.0691, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9828675251195005, | |
| "eval_f1": 0.9571737738897187, | |
| "eval_loss": 0.06881628185510635, | |
| "eval_precision": 0.9594256034219371, | |
| "eval_recall": 0.9549324899647245, | |
| "eval_runtime": 54.0752, | |
| "eval_samples_per_second": 84.253, | |
| "eval_steps_per_second": 10.541, | |
| "step": 4556 | |
| }, | |
| { | |
| "epoch": 2.194907813871817, | |
| "grad_norm": 1.1226553916931152, | |
| "learning_rate": 9.025460930640914e-06, | |
| "loss": 0.0587, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.4143985952589992, | |
| "grad_norm": 0.44973134994506836, | |
| "learning_rate": 7.928007023705005e-06, | |
| "loss": 0.0568, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.633889376646181, | |
| "grad_norm": 0.9191294312477112, | |
| "learning_rate": 6.830553116769097e-06, | |
| "loss": 0.0523, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.853380158033363, | |
| "grad_norm": 0.8285248875617981, | |
| "learning_rate": 5.7330992098331876e-06, | |
| "loss": 0.0526, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9822822163691347, | |
| "eval_f1": 0.9555535162582974, | |
| "eval_loss": 0.06393539160490036, | |
| "eval_precision": 0.9612283832851253, | |
| "eval_recall": 0.9499452621335603, | |
| "eval_runtime": 54.0536, | |
| "eval_samples_per_second": 84.287, | |
| "eval_steps_per_second": 10.545, | |
| "step": 6834 | |
| }, | |
| { | |
| "epoch": 3.0728709394205445, | |
| "grad_norm": 0.521659255027771, | |
| "learning_rate": 4.6356453028972785e-06, | |
| "loss": 0.0509, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.292361720807726, | |
| "grad_norm": 0.4619588553905487, | |
| "learning_rate": 3.53819139596137e-06, | |
| "loss": 0.0435, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.511852502194908, | |
| "grad_norm": 1.1297919750213623, | |
| "learning_rate": 2.440737489025461e-06, | |
| "loss": 0.0448, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.7313432835820897, | |
| "grad_norm": 0.7216348648071289, | |
| "learning_rate": 1.3432835820895524e-06, | |
| "loss": 0.044, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 3.9508340649692713, | |
| "grad_norm": 1.2042436599731445, | |
| "learning_rate": 2.458296751536436e-07, | |
| "loss": 0.0426, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9840991122817286, | |
| "eval_f1": 0.9601125657653249, | |
| "eval_loss": 0.05840596929192543, | |
| "eval_precision": 0.9657846153846154, | |
| "eval_recall": 0.9545067510035276, | |
| "eval_runtime": 54.1839, | |
| "eval_samples_per_second": 84.084, | |
| "eval_steps_per_second": 10.52, | |
| "step": 9112 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 9112, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.6137019563915456e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |