| { | |
| "best_metric": 0.937442502299908, | |
| "best_model_checkpoint": "data/train-test/roberta-large-output//model/checkpoint-232", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 232, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": null, | |
| "eval_f1": 0.902638762511374, | |
| "eval_loss": 0.05000825226306915, | |
| "eval_precision": 0.8928892889288929, | |
| "eval_recall": 0.9126034958601656, | |
| "eval_runtime": 4.4114, | |
| "eval_samples_per_second": 219.883, | |
| "eval_steps_per_second": 7.027, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": null, | |
| "eval_f1": 0.9276285844333181, | |
| "eval_loss": 0.03446832671761513, | |
| "eval_precision": 0.918018018018018, | |
| "eval_recall": 0.937442502299908, | |
| "eval_runtime": 4.4048, | |
| "eval_samples_per_second": 220.215, | |
| "eval_steps_per_second": 7.038, | |
| "step": 232 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 232, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "total_flos": 853475550077862.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |