| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.5673758865248227, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 0.00019981060606060605, | |
| "loss": 2.9206, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.00019962121212121212, | |
| "loss": 2.7609, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0001994318181818182, | |
| "loss": 2.6878, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.00019924242424242426, | |
| "loss": 2.6697, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0001990530303030303, | |
| "loss": 2.5818, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00019886363636363637, | |
| "loss": 2.5396, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.00019867424242424244, | |
| "loss": 2.5265, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0001984848484848485, | |
| "loss": 2.5475, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019829545454545455, | |
| "loss": 2.4835, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0001981060606060606, | |
| "loss": 2.4559, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0001979166666666667, | |
| "loss": 2.4511, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.00019772727272727273, | |
| "loss": 2.4592, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0001975378787878788, | |
| "loss": 2.4495, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019734848484848484, | |
| "loss": 2.4714, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.00019715909090909094, | |
| "loss": 2.4302, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019696969696969698, | |
| "loss": 2.4097, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019678030303030305, | |
| "loss": 2.4523, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.0001965909090909091, | |
| "loss": 2.4325, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.00019640151515151516, | |
| "loss": 2.4125, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019621212121212123, | |
| "loss": 2.4329, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019602272727272727, | |
| "loss": 2.3471, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.00019583333333333334, | |
| "loss": 2.3012, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0001956439393939394, | |
| "loss": 2.3869, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019545454545454548, | |
| "loss": 2.3822, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.00019526515151515152, | |
| "loss": 2.3427, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0001950757575757576, | |
| "loss": 2.3659, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019488636363636366, | |
| "loss": 2.3826, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0001946969696969697, | |
| "loss": 2.3532, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019450757575757577, | |
| "loss": 2.3828, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0001943181818181818, | |
| "loss": 2.3133, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.0001941287878787879, | |
| "loss": 2.3613, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019393939393939395, | |
| "loss": 2.3867, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 0.00019375000000000002, | |
| "loss": 2.2966, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019356060606060606, | |
| "loss": 2.3436, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019337121212121213, | |
| "loss": 2.3425, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0001931818181818182, | |
| "loss": 2.307, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.00019299242424242424, | |
| "loss": 2.3521, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0001928030303030303, | |
| "loss": 2.3302, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019261363636363635, | |
| "loss": 2.312, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019242424242424245, | |
| "loss": 2.3655, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001922348484848485, | |
| "loss": 2.344, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019204545454545456, | |
| "loss": 2.3373, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0001918560606060606, | |
| "loss": 2.3331, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.00019166666666666667, | |
| "loss": 2.3376, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019147727272727274, | |
| "loss": 2.3369, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019128787878787878, | |
| "loss": 2.3413, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.00019109848484848485, | |
| "loss": 2.3212, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019090909090909092, | |
| "loss": 2.307, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.000190719696969697, | |
| "loss": 2.2929, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019053030303030303, | |
| "loss": 2.2873, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0001903409090909091, | |
| "loss": 2.3098, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00019015151515151517, | |
| "loss": 2.3129, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0001899621212121212, | |
| "loss": 2.3038, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.00018977272727272728, | |
| "loss": 2.286, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00018958333333333332, | |
| "loss": 2.3388, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00018939393939393942, | |
| "loss": 2.3193, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00018920454545454546, | |
| "loss": 2.3136, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00018901515151515153, | |
| "loss": 2.3141, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00018882575757575757, | |
| "loss": 2.3646, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.00018863636363636364, | |
| "loss": 2.3318, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0001884469696969697, | |
| "loss": 2.2977, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00018825757575757575, | |
| "loss": 2.2764, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00018806818181818182, | |
| "loss": 2.3095, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.0001878787878787879, | |
| "loss": 2.252, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.00018768939393939396, | |
| "loss": 2.2786, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0001875, | |
| "loss": 2.2789, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018731060606060607, | |
| "loss": 2.2841, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.00018712121212121212, | |
| "loss": 2.3436, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018693181818181818, | |
| "loss": 2.2956, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018674242424242425, | |
| "loss": 2.2353, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.0001865530303030303, | |
| "loss": 2.2772, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 0.00018636363636363636, | |
| "loss": 2.2496, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018617424242424243, | |
| "loss": 2.2477, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0001859848484848485, | |
| "loss": 2.2791, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00018579545454545454, | |
| "loss": 2.2799, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018560606060606061, | |
| "loss": 2.3132, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018541666666666668, | |
| "loss": 2.2542, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018522727272727273, | |
| "loss": 2.2609, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0001850378787878788, | |
| "loss": 2.2819, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018484848484848484, | |
| "loss": 2.2844, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018465909090909093, | |
| "loss": 2.2542, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00018446969696969697, | |
| "loss": 2.2603, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018428030303030304, | |
| "loss": 2.2832, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018409090909090909, | |
| "loss": 2.2869, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018390151515151518, | |
| "loss": 2.2646, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018371212121212122, | |
| "loss": 2.2698, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018352272727272727, | |
| "loss": 2.2757, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00018333333333333334, | |
| "loss": 2.2544, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.0001831439393939394, | |
| "loss": 2.2678, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018295454545454547, | |
| "loss": 2.2778, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018276515151515152, | |
| "loss": 2.2027, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018257575757575758, | |
| "loss": 2.2167, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00018238636363636365, | |
| "loss": 2.2602, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018219696969696972, | |
| "loss": 2.2736, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018200757575757577, | |
| "loss": 2.2443, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00018181818181818183, | |
| "loss": 2.2299, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.0001816287878787879, | |
| "loss": 2.2644, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018143939393939395, | |
| "loss": 2.259, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018125000000000001, | |
| "loss": 2.2567, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 0.00018106060606060606, | |
| "loss": 2.2599, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018087121212121213, | |
| "loss": 2.2091, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.0001806818181818182, | |
| "loss": 2.2312, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00018049242424242426, | |
| "loss": 2.1869, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001803030303030303, | |
| "loss": 2.2023, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00018011363636363638, | |
| "loss": 2.2132, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00017992424242424244, | |
| "loss": 2.2612, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.0001797348484848485, | |
| "loss": 2.2109, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00017954545454545456, | |
| "loss": 2.215, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0001793560606060606, | |
| "loss": 2.2114, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0001791666666666667, | |
| "loss": 2.2203, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00017897727272727274, | |
| "loss": 2.2594, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.0001787878787878788, | |
| "loss": 2.2001, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00017859848484848485, | |
| "loss": 2.2046, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00017840909090909092, | |
| "loss": 2.1907, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00017821969696969699, | |
| "loss": 2.2539, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00017803030303030303, | |
| "loss": 2.2335, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.0001778409090909091, | |
| "loss": 2.2171, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.00017765151515151517, | |
| "loss": 2.2278, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00017746212121212123, | |
| "loss": 2.231, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00017727272727272728, | |
| "loss": 2.2141, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.00017708333333333335, | |
| "loss": 2.2432, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00017689393939393942, | |
| "loss": 2.2266, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00017670454545454546, | |
| "loss": 2.1929, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00017651515151515153, | |
| "loss": 2.2077, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00017632575757575757, | |
| "loss": 2.2133, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017613636363636366, | |
| "loss": 2.2251, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.0001759469696969697, | |
| "loss": 2.2265, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00017575757575757578, | |
| "loss": 2.2186, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017556818181818182, | |
| "loss": 2.1925, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.0001753787878787879, | |
| "loss": 2.1956, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.00017518939393939396, | |
| "loss": 2.2459, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.000175, | |
| "loss": 2.22, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00017481060606060607, | |
| "loss": 2.2143, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.0001746212121212121, | |
| "loss": 2.2359, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.0001744318181818182, | |
| "loss": 2.2058, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00017424242424242425, | |
| "loss": 2.2307, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00017405303030303032, | |
| "loss": 2.2062, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00017386363636363636, | |
| "loss": 2.1796, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 0.00017367424242424243, | |
| "loss": 2.2054, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001734848484848485, | |
| "loss": 2.1651, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00017329545454545454, | |
| "loss": 2.2159, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.0001731060606060606, | |
| "loss": 2.1988, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017291666666666668, | |
| "loss": 2.1676, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017272727272727275, | |
| "loss": 2.1725, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.0001725378787878788, | |
| "loss": 2.2205, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00017234848484848486, | |
| "loss": 2.1486, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017215909090909093, | |
| "loss": 2.147, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017196969696969697, | |
| "loss": 2.1651, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00017178030303030304, | |
| "loss": 2.1983, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017159090909090908, | |
| "loss": 2.1778, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017140151515151518, | |
| "loss": 2.1631, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00017121212121212122, | |
| "loss": 2.1442, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.0001710227272727273, | |
| "loss": 2.1397, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00017083333333333333, | |
| "loss": 2.1697, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0001706439393939394, | |
| "loss": 2.1451, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00017045454545454547, | |
| "loss": 2.1789, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.0001702651515151515, | |
| "loss": 2.1037, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00017007575757575758, | |
| "loss": 2.1698, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00016988636363636365, | |
| "loss": 2.1538, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.00016969696969696972, | |
| "loss": 2.2015, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00016950757575757576, | |
| "loss": 2.179, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00016931818181818183, | |
| "loss": 2.1766, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.0001691287878787879, | |
| "loss": 2.1646, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00016893939393939394, | |
| "loss": 2.1694, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00016875, | |
| "loss": 2.1562, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00016856060606060605, | |
| "loss": 2.1551, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00016837121212121212, | |
| "loss": 2.1652, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0001681818181818182, | |
| "loss": 2.1594, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.00016799242424242426, | |
| "loss": 2.1674, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0001678030303030303, | |
| "loss": 2.1378, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00016761363636363637, | |
| "loss": 2.1447, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00016742424242424244, | |
| "loss": 2.1451, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00016723484848484848, | |
| "loss": 2.1336, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00016704545454545455, | |
| "loss": 2.1231, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0001668560606060606, | |
| "loss": 2.1143, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0001666666666666667, | |
| "loss": 2.1316, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.00016647727272727273, | |
| "loss": 2.1281, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0001662878787878788, | |
| "loss": 2.136, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016609848484848484, | |
| "loss": 2.1279, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016590909090909094, | |
| "loss": 2.1421, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00016571969696969698, | |
| "loss": 2.1541, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016553030303030305, | |
| "loss": 2.1293, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.0001653409090909091, | |
| "loss": 2.1294, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016515151515151516, | |
| "loss": 2.1459, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.00016496212121212123, | |
| "loss": 2.1113, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00016477272727272727, | |
| "loss": 2.1394, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.00016458333333333334, | |
| "loss": 2.1321, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.0001643939393939394, | |
| "loss": 2.148, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00016420454545454548, | |
| "loss": 2.1631, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00016401515151515152, | |
| "loss": 2.1276, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.0001638257575757576, | |
| "loss": 2.0706, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.00016363636363636366, | |
| "loss": 2.127, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.0001634469696969697, | |
| "loss": 2.1449, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.00016325757575757577, | |
| "loss": 2.1204, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.0001630681818181818, | |
| "loss": 2.0904, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.0001628787878787879, | |
| "loss": 2.1129, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016268939393939395, | |
| "loss": 2.1036, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016250000000000002, | |
| "loss": 2.1509, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00016231060606060606, | |
| "loss": 2.1239, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00016212121212121213, | |
| "loss": 2.145, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 1056, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "total_flos": 5.0683926177970176e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |