| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.3429796355841372, | |
| "eval_steps": 500, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 0.00019931271477663232, | |
| "loss": 2.5587, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0001986254295532646, | |
| "loss": 2.3914, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.00019793814432989693, | |
| "loss": 2.4218, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.00019725085910652924, | |
| "loss": 2.3414, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0001965635738831615, | |
| "loss": 2.2469, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00019587628865979381, | |
| "loss": 2.3241, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00019518900343642613, | |
| "loss": 2.3266, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019450171821305842, | |
| "loss": 2.1856, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019381443298969073, | |
| "loss": 2.3247, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019312714776632305, | |
| "loss": 2.3245, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019243986254295533, | |
| "loss": 2.2591, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019175257731958765, | |
| "loss": 2.1767, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019106529209621996, | |
| "loss": 2.3478, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019037800687285222, | |
| "loss": 2.3339, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00018969072164948454, | |
| "loss": 2.234, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00018900343642611685, | |
| "loss": 2.2651, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00018831615120274914, | |
| "loss": 2.1831, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00018762886597938145, | |
| "loss": 2.216, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00018694158075601377, | |
| "loss": 2.1359, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00018625429553264605, | |
| "loss": 2.1215, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00018556701030927837, | |
| "loss": 2.2179, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00018487972508591068, | |
| "loss": 2.2598, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00018419243986254294, | |
| "loss": 2.1813, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00018350515463917526, | |
| "loss": 2.2006, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00018281786941580757, | |
| "loss": 2.1564, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00018213058419243986, | |
| "loss": 2.2537, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00018144329896907217, | |
| "loss": 2.1975, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0001807560137457045, | |
| "loss": 2.2566, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00018006872852233677, | |
| "loss": 2.1464, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0001793814432989691, | |
| "loss": 2.1421, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001786941580756014, | |
| "loss": 2.1276, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00017800687285223366, | |
| "loss": 2.0649, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00017731958762886598, | |
| "loss": 2.1835, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001766323024054983, | |
| "loss": 2.1711, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00017594501718213058, | |
| "loss": 2.2591, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001752577319587629, | |
| "loss": 2.1471, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001745704467353952, | |
| "loss": 2.0861, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001738831615120275, | |
| "loss": 2.0702, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0001731958762886598, | |
| "loss": 2.1096, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00017250859106529212, | |
| "loss": 2.1062, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00017182130584192438, | |
| "loss": 2.2545, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0001711340206185567, | |
| "loss": 2.1572, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.000170446735395189, | |
| "loss": 2.0749, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0001697594501718213, | |
| "loss": 2.1922, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00016907216494845361, | |
| "loss": 2.1915, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00016838487972508593, | |
| "loss": 2.1594, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00016769759450171822, | |
| "loss": 2.176, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00016701030927835053, | |
| "loss": 2.1223, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00016632302405498285, | |
| "loss": 2.1263, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00016563573883161513, | |
| "loss": 2.0481, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00016494845360824742, | |
| "loss": 2.1043, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00016426116838487973, | |
| "loss": 2.1678, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00016357388316151202, | |
| "loss": 2.1602, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00016288659793814434, | |
| "loss": 2.1448, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00016219931271477665, | |
| "loss": 2.1536, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00016151202749140894, | |
| "loss": 2.0339, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00016082474226804125, | |
| "loss": 2.023, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00016013745704467357, | |
| "loss": 2.1407, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00015945017182130585, | |
| "loss": 2.1134, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00015876288659793814, | |
| "loss": 2.1652, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00015807560137457046, | |
| "loss": 2.0051, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00015738831615120274, | |
| "loss": 2.0604, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00015670103092783506, | |
| "loss": 2.1708, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00015601374570446737, | |
| "loss": 2.1106, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00015532646048109966, | |
| "loss": 2.1445, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00015463917525773197, | |
| "loss": 2.0879, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.0001539518900343643, | |
| "loss": 2.1498, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00015326460481099657, | |
| "loss": 2.0719, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00015257731958762886, | |
| "loss": 2.2167, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00015189003436426118, | |
| "loss": 2.0811, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00015120274914089346, | |
| "loss": 2.1058, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00015051546391752578, | |
| "loss": 2.0392, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0001498281786941581, | |
| "loss": 2.0957, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00014914089347079038, | |
| "loss": 1.9994, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001484536082474227, | |
| "loss": 2.0464, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.000147766323024055, | |
| "loss": 2.0417, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.0001470790378006873, | |
| "loss": 2.105, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00014639175257731958, | |
| "loss": 2.1147, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.0001457044673539519, | |
| "loss": 1.9964, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00014501718213058418, | |
| "loss": 1.9723, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0001443298969072165, | |
| "loss": 2.0621, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00014364261168384881, | |
| "loss": 2.2703, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0001429553264604811, | |
| "loss": 2.0815, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00014226804123711342, | |
| "loss": 2.0774, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00014158075601374573, | |
| "loss": 2.066, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00014089347079037802, | |
| "loss": 2.03, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001402061855670103, | |
| "loss": 2.1433, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00013951890034364262, | |
| "loss": 2.0811, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0001388316151202749, | |
| "loss": 1.9791, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00013814432989690722, | |
| "loss": 2.0876, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00013745704467353953, | |
| "loss": 2.0314, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00013676975945017182, | |
| "loss": 1.9485, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00013608247422680414, | |
| "loss": 2.078, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00013539518900343645, | |
| "loss": 2.1251, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00013470790378006874, | |
| "loss": 1.9736, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00013402061855670103, | |
| "loss": 2.0189, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00013333333333333334, | |
| "loss": 2.0061, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00013264604810996563, | |
| "loss": 1.9595, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00013195876288659794, | |
| "loss": 1.9702, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00013127147766323026, | |
| "loss": 2.0322, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 291, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 6.083398113601536e+16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |