diff --git "a/trainer_state.json" "b/trainer_state.json" new file mode 100644--- /dev/null +++ "b/trainer_state.json" @@ -0,0 +1,9492 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 2.9991668980838657, + "eval_steps": 500, + "global_step": 1350, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0022216051096917524, + "grad_norm": 0.06047070026397705, + "learning_rate": 2.2222222222222223e-05, + "loss": 10.6079, + "step": 1 + }, + { + "epoch": 0.004443210219383505, + "grad_norm": 0.04731635004281998, + "learning_rate": 4.4444444444444447e-05, + "loss": 10.247, + "step": 2 + }, + { + "epoch": 0.006664815329075257, + "grad_norm": 0.049777183681726456, + "learning_rate": 6.666666666666667e-05, + "loss": 10.4556, + "step": 3 + }, + { + "epoch": 0.00888642043876701, + "grad_norm": 0.04873029887676239, + "learning_rate": 8.888888888888889e-05, + "loss": 10.7528, + "step": 4 + }, + { + "epoch": 0.011108025548458762, + "grad_norm": 0.05402935668826103, + "learning_rate": 0.0001111111111111111, + "loss": 10.7387, + "step": 5 + }, + { + "epoch": 0.013329630658150514, + "grad_norm": 0.050039034336805344, + "learning_rate": 0.00013333333333333334, + "loss": 10.3825, + "step": 6 + }, + { + "epoch": 0.015551235767842266, + "grad_norm": 0.05475683882832527, + "learning_rate": 0.00015555555555555556, + "loss": 10.7209, + "step": 7 + }, + { + "epoch": 0.01777284087753402, + "grad_norm": 0.05116181820631027, + "learning_rate": 0.00017777777777777779, + "loss": 10.5757, + "step": 8 + }, + { + "epoch": 0.01999444598722577, + "grad_norm": 0.05796043574810028, + "learning_rate": 0.0002, + "loss": 10.7621, + "step": 9 + }, + { + "epoch": 0.022216051096917523, + "grad_norm": 0.050162676721811295, + "learning_rate": 0.0002222222222222222, + "loss": 10.6925, + "step": 10 + }, + { + "epoch": 0.024437656206609277, + "grad_norm": 0.050715889781713486, + "learning_rate": 0.0002444444444444445, + "loss": 10.5473, + "step": 11 + }, + { + "epoch": 0.026659261316301027, + "grad_norm": 0.0544884130358696, + "learning_rate": 0.0002666666666666667, + "loss": 10.5229, + "step": 12 + }, + { + "epoch": 0.02888086642599278, + "grad_norm": 0.05708073824644089, + "learning_rate": 0.0002888888888888889, + "loss": 10.4162, + "step": 13 + }, + { + "epoch": 0.03110247153568453, + "grad_norm": 0.05594116821885109, + "learning_rate": 0.0003111111111111111, + "loss": 10.8703, + "step": 14 + }, + { + "epoch": 0.03332407664537628, + "grad_norm": 0.05033649876713753, + "learning_rate": 0.0003333333333333333, + "loss": 10.3323, + "step": 15 + }, + { + "epoch": 0.03554568175506804, + "grad_norm": 0.053412653505802155, + "learning_rate": 0.00035555555555555557, + "loss": 10.4398, + "step": 16 + }, + { + "epoch": 0.03776728686475979, + "grad_norm": 0.054381825029850006, + "learning_rate": 0.00037777777777777777, + "loss": 10.3598, + "step": 17 + }, + { + "epoch": 0.03998889197445154, + "grad_norm": 0.055913932621479034, + "learning_rate": 0.0004, + "loss": 10.3756, + "step": 18 + }, + { + "epoch": 0.042210497084143296, + "grad_norm": 0.056334175169467926, + "learning_rate": 0.00042222222222222227, + "loss": 10.7314, + "step": 19 + }, + { + "epoch": 0.044432102193835046, + "grad_norm": 0.05052949860692024, + "learning_rate": 0.0004444444444444444, + "loss": 10.6529, + "step": 20 + }, + { + "epoch": 0.0466537073035268, + "grad_norm": 0.05472550168633461, + "learning_rate": 0.00046666666666666666, + "loss": 10.4808, + "step": 21 + }, + { + "epoch": 0.048875312413218554, + "grad_norm": 0.04639170691370964, + "learning_rate": 0.000488888888888889, + "loss": 10.4715, + "step": 22 + }, + { + "epoch": 0.051096917522910304, + "grad_norm": 0.05662548914551735, + "learning_rate": 0.0005111111111111112, + "loss": 10.3969, + "step": 23 + }, + { + "epoch": 0.053318522632602054, + "grad_norm": 0.04989445582032204, + "learning_rate": 0.0005333333333333334, + "loss": 10.3438, + "step": 24 + }, + { + "epoch": 0.055540127742293804, + "grad_norm": 0.05659613013267517, + "learning_rate": 0.0005555555555555556, + "loss": 10.2444, + "step": 25 + }, + { + "epoch": 0.05776173285198556, + "grad_norm": 0.05034107714891434, + "learning_rate": 0.0005777777777777778, + "loss": 10.47, + "step": 26 + }, + { + "epoch": 0.05998333796167731, + "grad_norm": 0.053235191851854324, + "learning_rate": 0.0006000000000000001, + "loss": 10.4771, + "step": 27 + }, + { + "epoch": 0.06220494307136906, + "grad_norm": 0.052242521196603775, + "learning_rate": 0.0006222222222222223, + "loss": 10.6162, + "step": 28 + }, + { + "epoch": 0.06442654818106082, + "grad_norm": 0.051469966769218445, + "learning_rate": 0.0006444444444444444, + "loss": 10.6046, + "step": 29 + }, + { + "epoch": 0.06664815329075256, + "grad_norm": 0.048590026795864105, + "learning_rate": 0.0006666666666666666, + "loss": 10.5241, + "step": 30 + }, + { + "epoch": 0.06886975840044432, + "grad_norm": 0.050730764865875244, + "learning_rate": 0.000688888888888889, + "loss": 10.3362, + "step": 31 + }, + { + "epoch": 0.07109136351013608, + "grad_norm": 0.05845819041132927, + "learning_rate": 0.0007111111111111111, + "loss": 10.4505, + "step": 32 + }, + { + "epoch": 0.07331296861982782, + "grad_norm": 0.053127389401197433, + "learning_rate": 0.0007333333333333333, + "loss": 10.6673, + "step": 33 + }, + { + "epoch": 0.07553457372951958, + "grad_norm": 0.05221354588866234, + "learning_rate": 0.0007555555555555555, + "loss": 10.4441, + "step": 34 + }, + { + "epoch": 0.07775617883921133, + "grad_norm": 0.052416905760765076, + "learning_rate": 0.0007777777777777777, + "loss": 10.5406, + "step": 35 + }, + { + "epoch": 0.07997778394890308, + "grad_norm": 0.055885300040245056, + "learning_rate": 0.0008, + "loss": 10.6719, + "step": 36 + }, + { + "epoch": 0.08219938905859484, + "grad_norm": 0.05125787854194641, + "learning_rate": 0.0008222222222222222, + "loss": 10.3831, + "step": 37 + }, + { + "epoch": 0.08442099416828659, + "grad_norm": 0.05321040004491806, + "learning_rate": 0.0008444444444444445, + "loss": 10.5362, + "step": 38 + }, + { + "epoch": 0.08664259927797834, + "grad_norm": 0.048330310732126236, + "learning_rate": 0.0008666666666666666, + "loss": 10.618, + "step": 39 + }, + { + "epoch": 0.08886420438767009, + "grad_norm": 0.05690504238009453, + "learning_rate": 0.0008888888888888888, + "loss": 10.3795, + "step": 40 + }, + { + "epoch": 0.09108580949736185, + "grad_norm": 0.050180643796920776, + "learning_rate": 0.0009111111111111111, + "loss": 10.5554, + "step": 41 + }, + { + "epoch": 0.0933074146070536, + "grad_norm": 0.05029109865427017, + "learning_rate": 0.0009333333333333333, + "loss": 10.3326, + "step": 42 + }, + { + "epoch": 0.09552901971674535, + "grad_norm": 0.05197408050298691, + "learning_rate": 0.0009555555555555556, + "loss": 10.2956, + "step": 43 + }, + { + "epoch": 0.09775062482643711, + "grad_norm": 0.0569787472486496, + "learning_rate": 0.000977777777777778, + "loss": 10.3524, + "step": 44 + }, + { + "epoch": 0.09997222993612885, + "grad_norm": 0.051295869052410126, + "learning_rate": 0.001, + "loss": 10.6064, + "step": 45 + }, + { + "epoch": 0.10219383504582061, + "grad_norm": 0.043966952711343765, + "learning_rate": 0.0010222222222222223, + "loss": 10.4717, + "step": 46 + }, + { + "epoch": 0.10441544015551235, + "grad_norm": 0.05129879713058472, + "learning_rate": 0.0010444444444444444, + "loss": 10.3595, + "step": 47 + }, + { + "epoch": 0.10663704526520411, + "grad_norm": 0.053009241819381714, + "learning_rate": 0.0010666666666666667, + "loss": 10.5078, + "step": 48 + }, + { + "epoch": 0.10885865037489587, + "grad_norm": 0.04631511867046356, + "learning_rate": 0.001088888888888889, + "loss": 10.456, + "step": 49 + }, + { + "epoch": 0.11108025548458761, + "grad_norm": 0.055996429175138474, + "learning_rate": 0.0011111111111111111, + "loss": 10.1887, + "step": 50 + }, + { + "epoch": 0.11330186059427937, + "grad_norm": 0.0516912117600441, + "learning_rate": 0.0011333333333333334, + "loss": 10.0742, + "step": 51 + }, + { + "epoch": 0.11552346570397112, + "grad_norm": 0.053418293595314026, + "learning_rate": 0.0011555555555555555, + "loss": 10.4041, + "step": 52 + }, + { + "epoch": 0.11774507081366287, + "grad_norm": 0.05349532887339592, + "learning_rate": 0.0011777777777777778, + "loss": 10.1057, + "step": 53 + }, + { + "epoch": 0.11996667592335462, + "grad_norm": 0.05413784086704254, + "learning_rate": 0.0012000000000000001, + "loss": 10.2958, + "step": 54 + }, + { + "epoch": 0.12218828103304638, + "grad_norm": 0.04732006415724754, + "learning_rate": 0.0012222222222222222, + "loss": 10.229, + "step": 55 + }, + { + "epoch": 0.12440988614273812, + "grad_norm": 0.06012426316738129, + "learning_rate": 0.0012444444444444445, + "loss": 10.0901, + "step": 56 + }, + { + "epoch": 0.12663149125242987, + "grad_norm": 0.05074010044336319, + "learning_rate": 0.0012666666666666666, + "loss": 10.273, + "step": 57 + }, + { + "epoch": 0.12885309636212164, + "grad_norm": 0.048709437251091, + "learning_rate": 0.001288888888888889, + "loss": 10.1621, + "step": 58 + }, + { + "epoch": 0.13107470147181338, + "grad_norm": 0.05393685773015022, + "learning_rate": 0.0013111111111111112, + "loss": 10.0042, + "step": 59 + }, + { + "epoch": 0.13329630658150513, + "grad_norm": 0.04674612730741501, + "learning_rate": 0.0013333333333333333, + "loss": 10.2969, + "step": 60 + }, + { + "epoch": 0.1355179116911969, + "grad_norm": 0.05694056674838066, + "learning_rate": 0.0013555555555555556, + "loss": 10.151, + "step": 61 + }, + { + "epoch": 0.13773951680088864, + "grad_norm": 0.057547781616449356, + "learning_rate": 0.001377777777777778, + "loss": 10.5158, + "step": 62 + }, + { + "epoch": 0.13996112191058038, + "grad_norm": 0.05202538147568703, + "learning_rate": 0.0014, + "loss": 10.0441, + "step": 63 + }, + { + "epoch": 0.14218272702027215, + "grad_norm": 0.05227857083082199, + "learning_rate": 0.0014222222222222223, + "loss": 10.0009, + "step": 64 + }, + { + "epoch": 0.1444043321299639, + "grad_norm": 0.05450652912259102, + "learning_rate": 0.0014444444444444444, + "loss": 10.1787, + "step": 65 + }, + { + "epoch": 0.14662593723965564, + "grad_norm": 0.04978035390377045, + "learning_rate": 0.0014666666666666667, + "loss": 10.6574, + "step": 66 + }, + { + "epoch": 0.1488475423493474, + "grad_norm": 0.04803209751844406, + "learning_rate": 0.001488888888888889, + "loss": 10.1993, + "step": 67 + }, + { + "epoch": 0.15106914745903915, + "grad_norm": 0.04095118120312691, + "learning_rate": 0.001511111111111111, + "loss": 9.9589, + "step": 68 + }, + { + "epoch": 0.1532907525687309, + "grad_norm": 0.049227792769670486, + "learning_rate": 0.0015333333333333332, + "loss": 10.1613, + "step": 69 + }, + { + "epoch": 0.15551235767842267, + "grad_norm": 0.056645046919584274, + "learning_rate": 0.0015555555555555555, + "loss": 10.157, + "step": 70 + }, + { + "epoch": 0.1577339627881144, + "grad_norm": 0.04908820986747742, + "learning_rate": 0.0015777777777777778, + "loss": 10.3338, + "step": 71 + }, + { + "epoch": 0.15995556789780616, + "grad_norm": 0.048528026789426804, + "learning_rate": 0.0016, + "loss": 10.1877, + "step": 72 + }, + { + "epoch": 0.16217717300749793, + "grad_norm": 0.0497007817029953, + "learning_rate": 0.0016222222222222222, + "loss": 10.0367, + "step": 73 + }, + { + "epoch": 0.16439877811718967, + "grad_norm": 0.05009305849671364, + "learning_rate": 0.0016444444444444445, + "loss": 9.9939, + "step": 74 + }, + { + "epoch": 0.1666203832268814, + "grad_norm": 0.04887579008936882, + "learning_rate": 0.0016666666666666668, + "loss": 10.2447, + "step": 75 + }, + { + "epoch": 0.16884198833657318, + "grad_norm": 0.04899114370346069, + "learning_rate": 0.001688888888888889, + "loss": 10.0718, + "step": 76 + }, + { + "epoch": 0.17106359344626493, + "grad_norm": 0.05017256736755371, + "learning_rate": 0.0017111111111111114, + "loss": 9.7071, + "step": 77 + }, + { + "epoch": 0.17328519855595667, + "grad_norm": 0.05062386393547058, + "learning_rate": 0.0017333333333333333, + "loss": 10.3495, + "step": 78 + }, + { + "epoch": 0.17550680366564844, + "grad_norm": 0.054166313260793686, + "learning_rate": 0.0017555555555555556, + "loss": 9.8393, + "step": 79 + }, + { + "epoch": 0.17772840877534019, + "grad_norm": 0.04704143479466438, + "learning_rate": 0.0017777777777777776, + "loss": 9.8577, + "step": 80 + }, + { + "epoch": 0.17995001388503193, + "grad_norm": 0.0470828153192997, + "learning_rate": 0.0018, + "loss": 10.2248, + "step": 81 + }, + { + "epoch": 0.1821716189947237, + "grad_norm": 0.046098653227090836, + "learning_rate": 0.0018222222222222223, + "loss": 10.0535, + "step": 82 + }, + { + "epoch": 0.18439322410441544, + "grad_norm": 0.0534447506070137, + "learning_rate": 0.0018444444444444446, + "loss": 9.9007, + "step": 83 + }, + { + "epoch": 0.1866148292141072, + "grad_norm": 0.05775776132941246, + "learning_rate": 0.0018666666666666666, + "loss": 10.0096, + "step": 84 + }, + { + "epoch": 0.18883643432379896, + "grad_norm": 0.0538940355181694, + "learning_rate": 0.001888888888888889, + "loss": 9.8869, + "step": 85 + }, + { + "epoch": 0.1910580394334907, + "grad_norm": 0.055318545550107956, + "learning_rate": 0.0019111111111111113, + "loss": 10.0082, + "step": 86 + }, + { + "epoch": 0.19327964454318244, + "grad_norm": 0.04999914392828941, + "learning_rate": 0.0019333333333333336, + "loss": 9.9907, + "step": 87 + }, + { + "epoch": 0.19550124965287421, + "grad_norm": 0.04839245602488518, + "learning_rate": 0.001955555555555556, + "loss": 9.6986, + "step": 88 + }, + { + "epoch": 0.19772285476256596, + "grad_norm": 0.051004353910684586, + "learning_rate": 0.0019777777777777775, + "loss": 10.0626, + "step": 89 + }, + { + "epoch": 0.1999444598722577, + "grad_norm": 0.050714798271656036, + "learning_rate": 0.002, + "loss": 9.4952, + "step": 90 + }, + { + "epoch": 0.20216606498194944, + "grad_norm": 0.046029288321733475, + "learning_rate": 0.002022222222222222, + "loss": 10.0677, + "step": 91 + }, + { + "epoch": 0.20438767009164122, + "grad_norm": 0.052206166088581085, + "learning_rate": 0.0020444444444444447, + "loss": 9.7142, + "step": 92 + }, + { + "epoch": 0.20660927520133296, + "grad_norm": 0.05498271808028221, + "learning_rate": 0.0020666666666666667, + "loss": 10.1042, + "step": 93 + }, + { + "epoch": 0.2088308803110247, + "grad_norm": 0.05288155749440193, + "learning_rate": 0.002088888888888889, + "loss": 9.6725, + "step": 94 + }, + { + "epoch": 0.21105248542071647, + "grad_norm": 0.04739094898104668, + "learning_rate": 0.0021111111111111113, + "loss": 9.8394, + "step": 95 + }, + { + "epoch": 0.21327409053040822, + "grad_norm": 0.05595097690820694, + "learning_rate": 0.0021333333333333334, + "loss": 9.8368, + "step": 96 + }, + { + "epoch": 0.21549569564009996, + "grad_norm": 0.052203740924596786, + "learning_rate": 0.0021555555555555555, + "loss": 9.5522, + "step": 97 + }, + { + "epoch": 0.21771730074979173, + "grad_norm": 0.04803293198347092, + "learning_rate": 0.002177777777777778, + "loss": 9.7696, + "step": 98 + }, + { + "epoch": 0.21993890585948347, + "grad_norm": 0.054248202592134476, + "learning_rate": 0.0021999999999999997, + "loss": 9.7664, + "step": 99 + }, + { + "epoch": 0.22216051096917522, + "grad_norm": 0.04804198816418648, + "learning_rate": 0.0022222222222222222, + "loss": 9.6602, + "step": 100 + }, + { + "epoch": 0.224382116078867, + "grad_norm": 0.0431906133890152, + "learning_rate": 0.0022444444444444443, + "loss": 9.4522, + "step": 101 + }, + { + "epoch": 0.22660372118855873, + "grad_norm": 0.05638457089662552, + "learning_rate": 0.002266666666666667, + "loss": 9.5481, + "step": 102 + }, + { + "epoch": 0.22882532629825048, + "grad_norm": 0.053331535309553146, + "learning_rate": 0.002288888888888889, + "loss": 9.6493, + "step": 103 + }, + { + "epoch": 0.23104693140794225, + "grad_norm": 0.05476195365190506, + "learning_rate": 0.002311111111111111, + "loss": 9.536, + "step": 104 + }, + { + "epoch": 0.233268536517634, + "grad_norm": 0.056775737553834915, + "learning_rate": 0.0023333333333333335, + "loss": 9.3876, + "step": 105 + }, + { + "epoch": 0.23549014162732573, + "grad_norm": 0.04439986124634743, + "learning_rate": 0.0023555555555555556, + "loss": 9.671, + "step": 106 + }, + { + "epoch": 0.2377117467370175, + "grad_norm": 0.04525638371706009, + "learning_rate": 0.002377777777777778, + "loss": 9.5923, + "step": 107 + }, + { + "epoch": 0.23993335184670925, + "grad_norm": 0.04767855256795883, + "learning_rate": 0.0024000000000000002, + "loss": 9.6033, + "step": 108 + }, + { + "epoch": 0.242154956956401, + "grad_norm": 0.047220680862665176, + "learning_rate": 0.0024222222222222223, + "loss": 9.5502, + "step": 109 + }, + { + "epoch": 0.24437656206609276, + "grad_norm": 0.04153040051460266, + "learning_rate": 0.0024444444444444444, + "loss": 9.5775, + "step": 110 + }, + { + "epoch": 0.2465981671757845, + "grad_norm": 0.050917111337184906, + "learning_rate": 0.0024666666666666665, + "loss": 9.5931, + "step": 111 + }, + { + "epoch": 0.24881977228547625, + "grad_norm": 0.0504818893969059, + "learning_rate": 0.002488888888888889, + "loss": 9.4467, + "step": 112 + }, + { + "epoch": 0.251041377395168, + "grad_norm": 0.0438624769449234, + "learning_rate": 0.002511111111111111, + "loss": 9.2971, + "step": 113 + }, + { + "epoch": 0.25326298250485973, + "grad_norm": 0.04707180708646774, + "learning_rate": 0.002533333333333333, + "loss": 9.3382, + "step": 114 + }, + { + "epoch": 0.2554845876145515, + "grad_norm": 0.049067236483097076, + "learning_rate": 0.0025555555555555557, + "loss": 9.4641, + "step": 115 + }, + { + "epoch": 0.2577061927242433, + "grad_norm": 0.046793680638074875, + "learning_rate": 0.002577777777777778, + "loss": 9.6351, + "step": 116 + }, + { + "epoch": 0.259927797833935, + "grad_norm": 0.048384182155132294, + "learning_rate": 0.0026000000000000003, + "loss": 9.3845, + "step": 117 + }, + { + "epoch": 0.26214940294362676, + "grad_norm": 0.051988836377859116, + "learning_rate": 0.0026222222222222224, + "loss": 9.5515, + "step": 118 + }, + { + "epoch": 0.26437100805331853, + "grad_norm": 0.04786220192909241, + "learning_rate": 0.0026444444444444445, + "loss": 9.3949, + "step": 119 + }, + { + "epoch": 0.26659261316301025, + "grad_norm": 0.05733378604054451, + "learning_rate": 0.0026666666666666666, + "loss": 9.0427, + "step": 120 + }, + { + "epoch": 0.268814218272702, + "grad_norm": 0.05151943117380142, + "learning_rate": 0.0026888888888888887, + "loss": 9.4388, + "step": 121 + }, + { + "epoch": 0.2710358233823938, + "grad_norm": 0.04847846180200577, + "learning_rate": 0.002711111111111111, + "loss": 9.3863, + "step": 122 + }, + { + "epoch": 0.2732574284920855, + "grad_norm": 0.052508387714624405, + "learning_rate": 0.0027333333333333333, + "loss": 9.2321, + "step": 123 + }, + { + "epoch": 0.2754790336017773, + "grad_norm": 0.04624883830547333, + "learning_rate": 0.002755555555555556, + "loss": 9.5099, + "step": 124 + }, + { + "epoch": 0.27770063871146905, + "grad_norm": 0.05091821402311325, + "learning_rate": 0.002777777777777778, + "loss": 9.1828, + "step": 125 + }, + { + "epoch": 0.27992224382116077, + "grad_norm": 0.05264339596033096, + "learning_rate": 0.0028, + "loss": 9.055, + "step": 126 + }, + { + "epoch": 0.28214384893085254, + "grad_norm": 0.05629086494445801, + "learning_rate": 0.0028222222222222225, + "loss": 8.9799, + "step": 127 + }, + { + "epoch": 0.2843654540405443, + "grad_norm": 0.051374807953834534, + "learning_rate": 0.0028444444444444446, + "loss": 9.0618, + "step": 128 + }, + { + "epoch": 0.286587059150236, + "grad_norm": 0.048084281384944916, + "learning_rate": 0.0028666666666666667, + "loss": 9.1234, + "step": 129 + }, + { + "epoch": 0.2888086642599278, + "grad_norm": 0.05345724895596504, + "learning_rate": 0.0028888888888888888, + "loss": 8.9961, + "step": 130 + }, + { + "epoch": 0.29103026936961957, + "grad_norm": 0.04753989726305008, + "learning_rate": 0.002911111111111111, + "loss": 9.2071, + "step": 131 + }, + { + "epoch": 0.2932518744793113, + "grad_norm": 0.042281683534383774, + "learning_rate": 0.0029333333333333334, + "loss": 9.0905, + "step": 132 + }, + { + "epoch": 0.29547347958900305, + "grad_norm": 0.04851217195391655, + "learning_rate": 0.0029555555555555555, + "loss": 8.7241, + "step": 133 + }, + { + "epoch": 0.2976950846986948, + "grad_norm": 0.04813491553068161, + "learning_rate": 0.002977777777777778, + "loss": 9.0812, + "step": 134 + }, + { + "epoch": 0.29991668980838654, + "grad_norm": 0.052904680371284485, + "learning_rate": 0.003, + "loss": 8.6711, + "step": 135 + }, + { + "epoch": 0.3021382949180783, + "grad_norm": 0.05275777354836464, + "learning_rate": 0.0029975308641975312, + "loss": 8.6604, + "step": 136 + }, + { + "epoch": 0.3043599000277701, + "grad_norm": 0.04787842929363251, + "learning_rate": 0.0029950617283950615, + "loss": 8.8263, + "step": 137 + }, + { + "epoch": 0.3065815051374618, + "grad_norm": 0.05127862095832825, + "learning_rate": 0.0029925925925925927, + "loss": 8.8265, + "step": 138 + }, + { + "epoch": 0.30880311024715357, + "grad_norm": 0.04686402529478073, + "learning_rate": 0.0029901234567901234, + "loss": 9.059, + "step": 139 + }, + { + "epoch": 0.31102471535684534, + "grad_norm": 0.04877879098057747, + "learning_rate": 0.0029876543209876546, + "loss": 8.9707, + "step": 140 + }, + { + "epoch": 0.31324632046653705, + "grad_norm": 0.0502287782728672, + "learning_rate": 0.002985185185185185, + "loss": 8.7495, + "step": 141 + }, + { + "epoch": 0.3154679255762288, + "grad_norm": 0.044707659631967545, + "learning_rate": 0.002982716049382716, + "loss": 8.9677, + "step": 142 + }, + { + "epoch": 0.3176895306859206, + "grad_norm": 0.052578676491975784, + "learning_rate": 0.0029802469135802472, + "loss": 8.6041, + "step": 143 + }, + { + "epoch": 0.3199111357956123, + "grad_norm": 0.04367254674434662, + "learning_rate": 0.002977777777777778, + "loss": 8.907, + "step": 144 + }, + { + "epoch": 0.3221327409053041, + "grad_norm": 0.048488833010196686, + "learning_rate": 0.0029753086419753087, + "loss": 8.5004, + "step": 145 + }, + { + "epoch": 0.32435434601499585, + "grad_norm": 0.04960470646619797, + "learning_rate": 0.0029728395061728394, + "loss": 8.5826, + "step": 146 + }, + { + "epoch": 0.32657595112468757, + "grad_norm": 0.05014924705028534, + "learning_rate": 0.0029703703703703706, + "loss": 8.6261, + "step": 147 + }, + { + "epoch": 0.32879755623437934, + "grad_norm": 0.04689573496580124, + "learning_rate": 0.0029679012345679013, + "loss": 8.7356, + "step": 148 + }, + { + "epoch": 0.3310191613440711, + "grad_norm": 0.051323119550943375, + "learning_rate": 0.002965432098765432, + "loss": 8.7201, + "step": 149 + }, + { + "epoch": 0.3332407664537628, + "grad_norm": 0.047252483665943146, + "learning_rate": 0.002962962962962963, + "loss": 8.8438, + "step": 150 + }, + { + "epoch": 0.3354623715634546, + "grad_norm": 0.04643348976969719, + "learning_rate": 0.002960493827160494, + "loss": 8.6791, + "step": 151 + }, + { + "epoch": 0.33768397667314637, + "grad_norm": 0.05457242578268051, + "learning_rate": 0.0029580246913580247, + "loss": 8.4789, + "step": 152 + }, + { + "epoch": 0.3399055817828381, + "grad_norm": 0.050342462956905365, + "learning_rate": 0.0029555555555555555, + "loss": 8.6765, + "step": 153 + }, + { + "epoch": 0.34212718689252986, + "grad_norm": 0.047568775713443756, + "learning_rate": 0.0029530864197530866, + "loss": 8.4614, + "step": 154 + }, + { + "epoch": 0.3443487920022216, + "grad_norm": 0.049684301018714905, + "learning_rate": 0.0029506172839506174, + "loss": 8.7243, + "step": 155 + }, + { + "epoch": 0.34657039711191334, + "grad_norm": 0.04956204071640968, + "learning_rate": 0.002948148148148148, + "loss": 8.3286, + "step": 156 + }, + { + "epoch": 0.3487920022216051, + "grad_norm": 0.05027199909090996, + "learning_rate": 0.002945679012345679, + "loss": 8.4557, + "step": 157 + }, + { + "epoch": 0.3510136073312969, + "grad_norm": 0.049550533294677734, + "learning_rate": 0.00294320987654321, + "loss": 8.5551, + "step": 158 + }, + { + "epoch": 0.3532352124409886, + "grad_norm": 0.0448816642165184, + "learning_rate": 0.0029407407407407407, + "loss": 8.5497, + "step": 159 + }, + { + "epoch": 0.35545681755068037, + "grad_norm": 0.0545976497232914, + "learning_rate": 0.0029382716049382715, + "loss": 8.3274, + "step": 160 + }, + { + "epoch": 0.35767842266037214, + "grad_norm": 0.04282578080892563, + "learning_rate": 0.0029358024691358026, + "loss": 8.2482, + "step": 161 + }, + { + "epoch": 0.35990002777006386, + "grad_norm": 0.04906844720244408, + "learning_rate": 0.0029333333333333334, + "loss": 8.4081, + "step": 162 + }, + { + "epoch": 0.36212163287975563, + "grad_norm": 0.05085057392716408, + "learning_rate": 0.002930864197530864, + "loss": 8.1858, + "step": 163 + }, + { + "epoch": 0.3643432379894474, + "grad_norm": 0.042417705059051514, + "learning_rate": 0.002928395061728395, + "loss": 8.3901, + "step": 164 + }, + { + "epoch": 0.3665648430991391, + "grad_norm": 0.041248828172683716, + "learning_rate": 0.002925925925925926, + "loss": 8.1118, + "step": 165 + }, + { + "epoch": 0.3687864482088309, + "grad_norm": 0.04611312597990036, + "learning_rate": 0.002923456790123457, + "loss": 8.3726, + "step": 166 + }, + { + "epoch": 0.37100805331852266, + "grad_norm": 0.04500536993145943, + "learning_rate": 0.0029209876543209875, + "loss": 8.3331, + "step": 167 + }, + { + "epoch": 0.3732296584282144, + "grad_norm": 0.049214959144592285, + "learning_rate": 0.0029185185185185186, + "loss": 8.1056, + "step": 168 + }, + { + "epoch": 0.37545126353790614, + "grad_norm": 0.042415715754032135, + "learning_rate": 0.0029160493827160494, + "loss": 8.3253, + "step": 169 + }, + { + "epoch": 0.3776728686475979, + "grad_norm": 0.04630446061491966, + "learning_rate": 0.0029135802469135805, + "loss": 8.3159, + "step": 170 + }, + { + "epoch": 0.37989447375728963, + "grad_norm": 0.04075557366013527, + "learning_rate": 0.002911111111111111, + "loss": 8.1875, + "step": 171 + }, + { + "epoch": 0.3821160788669814, + "grad_norm": 0.04644331336021423, + "learning_rate": 0.002908641975308642, + "loss": 8.4214, + "step": 172 + }, + { + "epoch": 0.3843376839766732, + "grad_norm": 0.04412577301263809, + "learning_rate": 0.002906172839506173, + "loss": 8.072, + "step": 173 + }, + { + "epoch": 0.3865592890863649, + "grad_norm": 0.04354352876543999, + "learning_rate": 0.002903703703703704, + "loss": 8.0577, + "step": 174 + }, + { + "epoch": 0.38878089419605666, + "grad_norm": 0.04611154645681381, + "learning_rate": 0.0029012345679012346, + "loss": 8.3131, + "step": 175 + }, + { + "epoch": 0.39100249930574843, + "grad_norm": 0.040660127997398376, + "learning_rate": 0.0028987654320987654, + "loss": 8.2307, + "step": 176 + }, + { + "epoch": 0.39322410441544015, + "grad_norm": 0.04646790400147438, + "learning_rate": 0.0028962962962962966, + "loss": 8.2717, + "step": 177 + }, + { + "epoch": 0.3954457095251319, + "grad_norm": 0.04254138097167015, + "learning_rate": 0.0028938271604938273, + "loss": 8.0337, + "step": 178 + }, + { + "epoch": 0.3976673146348237, + "grad_norm": 0.03916044905781746, + "learning_rate": 0.002891358024691358, + "loss": 7.8892, + "step": 179 + }, + { + "epoch": 0.3998889197445154, + "grad_norm": 0.0402020625770092, + "learning_rate": 0.0028888888888888888, + "loss": 8.059, + "step": 180 + }, + { + "epoch": 0.4021105248542072, + "grad_norm": 0.04283468797802925, + "learning_rate": 0.00288641975308642, + "loss": 8.1629, + "step": 181 + }, + { + "epoch": 0.4043321299638989, + "grad_norm": 0.03644024208188057, + "learning_rate": 0.0028839506172839507, + "loss": 7.9251, + "step": 182 + }, + { + "epoch": 0.40655373507359066, + "grad_norm": 0.03857982158660889, + "learning_rate": 0.0028814814814814814, + "loss": 7.957, + "step": 183 + }, + { + "epoch": 0.40877534018328243, + "grad_norm": 0.03826620802283287, + "learning_rate": 0.0028790123456790126, + "loss": 8.0753, + "step": 184 + }, + { + "epoch": 0.41099694529297415, + "grad_norm": 0.03651922941207886, + "learning_rate": 0.0028765432098765433, + "loss": 7.9539, + "step": 185 + }, + { + "epoch": 0.4132185504026659, + "grad_norm": 0.04460339993238449, + "learning_rate": 0.002874074074074074, + "loss": 7.6859, + "step": 186 + }, + { + "epoch": 0.4154401555123577, + "grad_norm": 0.03517116606235504, + "learning_rate": 0.0028716049382716048, + "loss": 7.9726, + "step": 187 + }, + { + "epoch": 0.4176617606220494, + "grad_norm": 0.036879803985357285, + "learning_rate": 0.002869135802469136, + "loss": 8.0059, + "step": 188 + }, + { + "epoch": 0.4198833657317412, + "grad_norm": 0.036895960569381714, + "learning_rate": 0.0028666666666666667, + "loss": 7.7974, + "step": 189 + }, + { + "epoch": 0.42210497084143295, + "grad_norm": 0.03512522578239441, + "learning_rate": 0.0028641975308641974, + "loss": 7.9832, + "step": 190 + }, + { + "epoch": 0.42432657595112466, + "grad_norm": 0.043111640959978104, + "learning_rate": 0.0028617283950617286, + "loss": 7.7488, + "step": 191 + }, + { + "epoch": 0.42654818106081643, + "grad_norm": 0.03965785354375839, + "learning_rate": 0.0028592592592592593, + "loss": 7.6362, + "step": 192 + }, + { + "epoch": 0.4287697861705082, + "grad_norm": 0.04848213866353035, + "learning_rate": 0.00285679012345679, + "loss": 7.5347, + "step": 193 + }, + { + "epoch": 0.4309913912801999, + "grad_norm": 0.0401473306119442, + "learning_rate": 0.0028543209876543208, + "loss": 7.6057, + "step": 194 + }, + { + "epoch": 0.4332129963898917, + "grad_norm": 0.03666435182094574, + "learning_rate": 0.002851851851851852, + "loss": 7.585, + "step": 195 + }, + { + "epoch": 0.43543460149958346, + "grad_norm": 0.033364903181791306, + "learning_rate": 0.002849382716049383, + "loss": 7.9137, + "step": 196 + }, + { + "epoch": 0.4376562066092752, + "grad_norm": 0.036309197545051575, + "learning_rate": 0.0028469135802469134, + "loss": 7.5144, + "step": 197 + }, + { + "epoch": 0.43987781171896695, + "grad_norm": 0.03403438627719879, + "learning_rate": 0.0028444444444444446, + "loss": 7.884, + "step": 198 + }, + { + "epoch": 0.4420994168286587, + "grad_norm": 0.033892132341861725, + "learning_rate": 0.0028419753086419753, + "loss": 7.9242, + "step": 199 + }, + { + "epoch": 0.44432102193835044, + "grad_norm": 0.034154847264289856, + "learning_rate": 0.0028395061728395065, + "loss": 7.817, + "step": 200 + }, + { + "epoch": 0.4465426270480422, + "grad_norm": 0.03998449072241783, + "learning_rate": 0.002837037037037037, + "loss": 7.677, + "step": 201 + }, + { + "epoch": 0.448764232157734, + "grad_norm": 0.038014624267816544, + "learning_rate": 0.002834567901234568, + "loss": 7.6986, + "step": 202 + }, + { + "epoch": 0.4509858372674257, + "grad_norm": 0.03981682285666466, + "learning_rate": 0.002832098765432099, + "loss": 7.5149, + "step": 203 + }, + { + "epoch": 0.45320744237711746, + "grad_norm": 0.03799785301089287, + "learning_rate": 0.00282962962962963, + "loss": 7.3572, + "step": 204 + }, + { + "epoch": 0.45542904748680924, + "grad_norm": 0.04427637159824371, + "learning_rate": 0.0028271604938271606, + "loss": 7.1368, + "step": 205 + }, + { + "epoch": 0.45765065259650095, + "grad_norm": 0.03394748643040657, + "learning_rate": 0.0028246913580246913, + "loss": 7.5763, + "step": 206 + }, + { + "epoch": 0.4598722577061927, + "grad_norm": 0.03583474084734917, + "learning_rate": 0.0028222222222222225, + "loss": 7.7005, + "step": 207 + }, + { + "epoch": 0.4620938628158845, + "grad_norm": 0.033851176500320435, + "learning_rate": 0.0028197530864197532, + "loss": 7.7524, + "step": 208 + }, + { + "epoch": 0.4643154679255762, + "grad_norm": 0.03364001214504242, + "learning_rate": 0.002817283950617284, + "loss": 7.6403, + "step": 209 + }, + { + "epoch": 0.466537073035268, + "grad_norm": 0.039782341569662094, + "learning_rate": 0.0028148148148148147, + "loss": 7.4501, + "step": 210 + }, + { + "epoch": 0.46875867814495975, + "grad_norm": 0.033960845321416855, + "learning_rate": 0.002812345679012346, + "loss": 7.5254, + "step": 211 + }, + { + "epoch": 0.47098028325465147, + "grad_norm": 0.035350967198610306, + "learning_rate": 0.0028098765432098766, + "loss": 7.4594, + "step": 212 + }, + { + "epoch": 0.47320188836434324, + "grad_norm": 0.030056284740567207, + "learning_rate": 0.0028074074074074073, + "loss": 7.5952, + "step": 213 + }, + { + "epoch": 0.475423493474035, + "grad_norm": 0.03404721990227699, + "learning_rate": 0.0028049382716049385, + "loss": 7.26, + "step": 214 + }, + { + "epoch": 0.4776450985837267, + "grad_norm": 0.03368282690644264, + "learning_rate": 0.0028024691358024692, + "loss": 7.5807, + "step": 215 + }, + { + "epoch": 0.4798667036934185, + "grad_norm": 0.03246147185564041, + "learning_rate": 0.0028, + "loss": 7.4564, + "step": 216 + }, + { + "epoch": 0.48208830880311027, + "grad_norm": 0.03420104831457138, + "learning_rate": 0.0027975308641975307, + "loss": 7.413, + "step": 217 + }, + { + "epoch": 0.484309913912802, + "grad_norm": 0.03454587236046791, + "learning_rate": 0.002795061728395062, + "loss": 7.5475, + "step": 218 + }, + { + "epoch": 0.48653151902249375, + "grad_norm": 0.030751582235097885, + "learning_rate": 0.0027925925925925926, + "loss": 7.4298, + "step": 219 + }, + { + "epoch": 0.4887531241321855, + "grad_norm": 0.03355126827955246, + "learning_rate": 0.0027901234567901233, + "loss": 7.633, + "step": 220 + }, + { + "epoch": 0.49097472924187724, + "grad_norm": 0.03314507007598877, + "learning_rate": 0.0027876543209876545, + "loss": 7.5276, + "step": 221 + }, + { + "epoch": 0.493196334351569, + "grad_norm": 0.03196662664413452, + "learning_rate": 0.0027851851851851852, + "loss": 7.8203, + "step": 222 + }, + { + "epoch": 0.4954179394612608, + "grad_norm": 0.03462420031428337, + "learning_rate": 0.002782716049382716, + "loss": 7.2141, + "step": 223 + }, + { + "epoch": 0.4976395445709525, + "grad_norm": 0.027754031121730804, + "learning_rate": 0.0027802469135802467, + "loss": 7.2748, + "step": 224 + }, + { + "epoch": 0.49986114968064427, + "grad_norm": 0.040720872581005096, + "learning_rate": 0.002777777777777778, + "loss": 7.1818, + "step": 225 + }, + { + "epoch": 0.502082754790336, + "grad_norm": 0.04294149950146675, + "learning_rate": 0.002775308641975309, + "loss": 7.3151, + "step": 226 + }, + { + "epoch": 0.5043043599000278, + "grad_norm": 0.03208593651652336, + "learning_rate": 0.0027728395061728394, + "loss": 7.3898, + "step": 227 + }, + { + "epoch": 0.5065259650097195, + "grad_norm": 0.03241240605711937, + "learning_rate": 0.0027703703703703705, + "loss": 7.5746, + "step": 228 + }, + { + "epoch": 0.5087475701194113, + "grad_norm": 0.026224076747894287, + "learning_rate": 0.0027679012345679013, + "loss": 7.5724, + "step": 229 + }, + { + "epoch": 0.510969175229103, + "grad_norm": 0.02820778824388981, + "learning_rate": 0.0027654320987654324, + "loss": 7.3347, + "step": 230 + }, + { + "epoch": 0.5131907803387947, + "grad_norm": 0.030725786462426186, + "learning_rate": 0.0027629629629629627, + "loss": 7.0236, + "step": 231 + }, + { + "epoch": 0.5154123854484866, + "grad_norm": 0.03701042756438255, + "learning_rate": 0.002760493827160494, + "loss": 7.4099, + "step": 232 + }, + { + "epoch": 0.5176339905581783, + "grad_norm": 0.028012625873088837, + "learning_rate": 0.0027580246913580246, + "loss": 7.3943, + "step": 233 + }, + { + "epoch": 0.51985559566787, + "grad_norm": 0.03428445756435394, + "learning_rate": 0.002755555555555556, + "loss": 7.4596, + "step": 234 + }, + { + "epoch": 0.5220772007775618, + "grad_norm": 0.03811949864029884, + "learning_rate": 0.0027530864197530865, + "loss": 7.3027, + "step": 235 + }, + { + "epoch": 0.5242988058872535, + "grad_norm": 0.031404219567775726, + "learning_rate": 0.0027506172839506173, + "loss": 7.5975, + "step": 236 + }, + { + "epoch": 0.5265204109969452, + "grad_norm": 0.029649903997778893, + "learning_rate": 0.0027481481481481484, + "loss": 7.3965, + "step": 237 + }, + { + "epoch": 0.5287420161066371, + "grad_norm": 0.037028517574071884, + "learning_rate": 0.002745679012345679, + "loss": 7.1045, + "step": 238 + }, + { + "epoch": 0.5309636212163288, + "grad_norm": 0.03681131452322006, + "learning_rate": 0.00274320987654321, + "loss": 7.0011, + "step": 239 + }, + { + "epoch": 0.5331852263260205, + "grad_norm": 0.02837192639708519, + "learning_rate": 0.0027407407407407406, + "loss": 7.3327, + "step": 240 + }, + { + "epoch": 0.5354068314357123, + "grad_norm": 0.03215103596448898, + "learning_rate": 0.002738271604938272, + "loss": 7.2435, + "step": 241 + }, + { + "epoch": 0.537628436545404, + "grad_norm": 0.027541397139430046, + "learning_rate": 0.0027358024691358025, + "loss": 7.257, + "step": 242 + }, + { + "epoch": 0.5398500416550958, + "grad_norm": 0.02753252163529396, + "learning_rate": 0.0027333333333333333, + "loss": 7.257, + "step": 243 + }, + { + "epoch": 0.5420716467647876, + "grad_norm": 0.03283260762691498, + "learning_rate": 0.0027308641975308644, + "loss": 7.0467, + "step": 244 + }, + { + "epoch": 0.5442932518744793, + "grad_norm": 0.030674073845148087, + "learning_rate": 0.002728395061728395, + "loss": 7.157, + "step": 245 + }, + { + "epoch": 0.546514856984171, + "grad_norm": 0.030835680663585663, + "learning_rate": 0.002725925925925926, + "loss": 7.2868, + "step": 246 + }, + { + "epoch": 0.5487364620938628, + "grad_norm": 0.03128199651837349, + "learning_rate": 0.0027234567901234566, + "loss": 7.3337, + "step": 247 + }, + { + "epoch": 0.5509580672035546, + "grad_norm": 0.033430133014917374, + "learning_rate": 0.002720987654320988, + "loss": 7.6031, + "step": 248 + }, + { + "epoch": 0.5531796723132463, + "grad_norm": 0.0288421381264925, + "learning_rate": 0.0027185185185185185, + "loss": 6.896, + "step": 249 + }, + { + "epoch": 0.5554012774229381, + "grad_norm": 0.029844773933291435, + "learning_rate": 0.0027160493827160493, + "loss": 6.7998, + "step": 250 + }, + { + "epoch": 0.5576228825326298, + "grad_norm": 0.02905687503516674, + "learning_rate": 0.0027135802469135805, + "loss": 6.8452, + "step": 251 + }, + { + "epoch": 0.5598444876423215, + "grad_norm": 0.028612490743398666, + "learning_rate": 0.002711111111111111, + "loss": 7.3031, + "step": 252 + }, + { + "epoch": 0.5620660927520134, + "grad_norm": 0.03148072585463524, + "learning_rate": 0.002708641975308642, + "loss": 6.9144, + "step": 253 + }, + { + "epoch": 0.5642876978617051, + "grad_norm": 0.02918795496225357, + "learning_rate": 0.0027061728395061727, + "loss": 7.1563, + "step": 254 + }, + { + "epoch": 0.5665093029713968, + "grad_norm": 0.025865094736218452, + "learning_rate": 0.002703703703703704, + "loss": 7.1238, + "step": 255 + }, + { + "epoch": 0.5687309080810886, + "grad_norm": 0.03287065774202347, + "learning_rate": 0.002701234567901235, + "loss": 7.3028, + "step": 256 + }, + { + "epoch": 0.5709525131907803, + "grad_norm": 0.027668677270412445, + "learning_rate": 0.0026987654320987653, + "loss": 7.5821, + "step": 257 + }, + { + "epoch": 0.573174118300472, + "grad_norm": 0.031455788761377335, + "learning_rate": 0.0026962962962962965, + "loss": 7.4093, + "step": 258 + }, + { + "epoch": 0.5753957234101639, + "grad_norm": 0.031509652733802795, + "learning_rate": 0.002693827160493827, + "loss": 7.4072, + "step": 259 + }, + { + "epoch": 0.5776173285198556, + "grad_norm": 0.025732247158885002, + "learning_rate": 0.0026913580246913584, + "loss": 7.1182, + "step": 260 + }, + { + "epoch": 0.5798389336295473, + "grad_norm": 0.030423138290643692, + "learning_rate": 0.0026888888888888887, + "loss": 7.0401, + "step": 261 + }, + { + "epoch": 0.5820605387392391, + "grad_norm": 0.03525110334157944, + "learning_rate": 0.00268641975308642, + "loss": 7.2664, + "step": 262 + }, + { + "epoch": 0.5842821438489308, + "grad_norm": 0.02785809151828289, + "learning_rate": 0.0026839506172839506, + "loss": 7.1201, + "step": 263 + }, + { + "epoch": 0.5865037489586226, + "grad_norm": 0.031698115170001984, + "learning_rate": 0.0026814814814814817, + "loss": 6.9975, + "step": 264 + }, + { + "epoch": 0.5887253540683144, + "grad_norm": 0.031081918627023697, + "learning_rate": 0.0026790123456790125, + "loss": 6.9444, + "step": 265 + }, + { + "epoch": 0.5909469591780061, + "grad_norm": 0.0324559286236763, + "learning_rate": 0.002676543209876543, + "loss": 7.1328, + "step": 266 + }, + { + "epoch": 0.5931685642876978, + "grad_norm": 0.03233259543776512, + "learning_rate": 0.0026740740740740744, + "loss": 6.9415, + "step": 267 + }, + { + "epoch": 0.5953901693973896, + "grad_norm": 0.04220357909798622, + "learning_rate": 0.0026716049382716047, + "loss": 6.8164, + "step": 268 + }, + { + "epoch": 0.5976117745070814, + "grad_norm": 0.034913282841444016, + "learning_rate": 0.002669135802469136, + "loss": 7.2838, + "step": 269 + }, + { + "epoch": 0.5998333796167731, + "grad_norm": 0.02642977610230446, + "learning_rate": 0.0026666666666666666, + "loss": 7.3645, + "step": 270 + }, + { + "epoch": 0.6020549847264649, + "grad_norm": 0.030156614258885384, + "learning_rate": 0.0026641975308641977, + "loss": 7.0999, + "step": 271 + }, + { + "epoch": 0.6042765898361566, + "grad_norm": 0.03368644416332245, + "learning_rate": 0.0026617283950617285, + "loss": 7.2405, + "step": 272 + }, + { + "epoch": 0.6064981949458483, + "grad_norm": 0.03504369780421257, + "learning_rate": 0.002659259259259259, + "loss": 7.0479, + "step": 273 + }, + { + "epoch": 0.6087198000555402, + "grad_norm": 0.029628051444888115, + "learning_rate": 0.0026567901234567904, + "loss": 6.7508, + "step": 274 + }, + { + "epoch": 0.6109414051652319, + "grad_norm": 0.029622158035635948, + "learning_rate": 0.002654320987654321, + "loss": 7.2015, + "step": 275 + }, + { + "epoch": 0.6131630102749236, + "grad_norm": 0.032323166728019714, + "learning_rate": 0.002651851851851852, + "loss": 6.9056, + "step": 276 + }, + { + "epoch": 0.6153846153846154, + "grad_norm": 0.026018457487225533, + "learning_rate": 0.0026493827160493826, + "loss": 7.2168, + "step": 277 + }, + { + "epoch": 0.6176062204943071, + "grad_norm": 0.030117856338620186, + "learning_rate": 0.0026469135802469138, + "loss": 7.0721, + "step": 278 + }, + { + "epoch": 0.6198278256039988, + "grad_norm": 0.030448278412222862, + "learning_rate": 0.0026444444444444445, + "loss": 6.9572, + "step": 279 + }, + { + "epoch": 0.6220494307136907, + "grad_norm": 0.028298810124397278, + "learning_rate": 0.0026419753086419752, + "loss": 7.2107, + "step": 280 + }, + { + "epoch": 0.6242710358233824, + "grad_norm": 0.027341045439243317, + "learning_rate": 0.0026395061728395064, + "loss": 7.1902, + "step": 281 + }, + { + "epoch": 0.6264926409330741, + "grad_norm": 0.03337961807847023, + "learning_rate": 0.002637037037037037, + "loss": 7.1904, + "step": 282 + }, + { + "epoch": 0.6287142460427659, + "grad_norm": 0.030440054833889008, + "learning_rate": 0.002634567901234568, + "loss": 7.4463, + "step": 283 + }, + { + "epoch": 0.6309358511524576, + "grad_norm": 0.03126044571399689, + "learning_rate": 0.0026320987654320986, + "loss": 7.4159, + "step": 284 + }, + { + "epoch": 0.6331574562621494, + "grad_norm": 0.031162705272436142, + "learning_rate": 0.0026296296296296298, + "loss": 7.1282, + "step": 285 + }, + { + "epoch": 0.6353790613718412, + "grad_norm": 0.026622280478477478, + "learning_rate": 0.0026271604938271605, + "loss": 7.1215, + "step": 286 + }, + { + "epoch": 0.6376006664815329, + "grad_norm": 0.03224776312708855, + "learning_rate": 0.0026246913580246912, + "loss": 7.0815, + "step": 287 + }, + { + "epoch": 0.6398222715912246, + "grad_norm": 0.030211608856916428, + "learning_rate": 0.0026222222222222224, + "loss": 7.1783, + "step": 288 + }, + { + "epoch": 0.6420438767009164, + "grad_norm": 0.036949314177036285, + "learning_rate": 0.002619753086419753, + "loss": 7.2292, + "step": 289 + }, + { + "epoch": 0.6442654818106082, + "grad_norm": 0.030840424820780754, + "learning_rate": 0.002617283950617284, + "loss": 7.2447, + "step": 290 + }, + { + "epoch": 0.6464870869202999, + "grad_norm": 0.03116264007985592, + "learning_rate": 0.0026148148148148146, + "loss": 6.9522, + "step": 291 + }, + { + "epoch": 0.6487086920299917, + "grad_norm": 0.03676169738173485, + "learning_rate": 0.0026123456790123458, + "loss": 6.8615, + "step": 292 + }, + { + "epoch": 0.6509302971396834, + "grad_norm": 0.03387770429253578, + "learning_rate": 0.0026098765432098765, + "loss": 7.0317, + "step": 293 + }, + { + "epoch": 0.6531519022493751, + "grad_norm": 0.03344965726137161, + "learning_rate": 0.0026074074074074072, + "loss": 6.9111, + "step": 294 + }, + { + "epoch": 0.655373507359067, + "grad_norm": 0.03378414735198021, + "learning_rate": 0.0026049382716049384, + "loss": 6.8334, + "step": 295 + }, + { + "epoch": 0.6575951124687587, + "grad_norm": 0.028000645339488983, + "learning_rate": 0.002602469135802469, + "loss": 7.2455, + "step": 296 + }, + { + "epoch": 0.6598167175784504, + "grad_norm": 0.029027551412582397, + "learning_rate": 0.0026000000000000003, + "loss": 6.7113, + "step": 297 + }, + { + "epoch": 0.6620383226881422, + "grad_norm": 0.030199337750673294, + "learning_rate": 0.0025975308641975306, + "loss": 6.9931, + "step": 298 + }, + { + "epoch": 0.6642599277978339, + "grad_norm": 0.027782822027802467, + "learning_rate": 0.002595061728395062, + "loss": 7.0203, + "step": 299 + }, + { + "epoch": 0.6664815329075257, + "grad_norm": 0.027012750506401062, + "learning_rate": 0.0025925925925925925, + "loss": 7.0655, + "step": 300 + }, + { + "epoch": 0.6687031380172175, + "grad_norm": 0.026889488101005554, + "learning_rate": 0.0025901234567901237, + "loss": 7.0643, + "step": 301 + }, + { + "epoch": 0.6709247431269092, + "grad_norm": 0.03029460459947586, + "learning_rate": 0.0025876543209876544, + "loss": 7.0144, + "step": 302 + }, + { + "epoch": 0.6731463482366009, + "grad_norm": 0.026146529242396355, + "learning_rate": 0.002585185185185185, + "loss": 7.2646, + "step": 303 + }, + { + "epoch": 0.6753679533462927, + "grad_norm": 0.03537617623806, + "learning_rate": 0.0025827160493827163, + "loss": 6.8476, + "step": 304 + }, + { + "epoch": 0.6775895584559845, + "grad_norm": 0.03081704117357731, + "learning_rate": 0.002580246913580247, + "loss": 7.1906, + "step": 305 + }, + { + "epoch": 0.6798111635656762, + "grad_norm": 0.033128634095191956, + "learning_rate": 0.002577777777777778, + "loss": 7.0696, + "step": 306 + }, + { + "epoch": 0.682032768675368, + "grad_norm": 0.03375165909528732, + "learning_rate": 0.0025753086419753085, + "loss": 6.6613, + "step": 307 + }, + { + "epoch": 0.6842543737850597, + "grad_norm": 0.029131779447197914, + "learning_rate": 0.0025728395061728397, + "loss": 7.2572, + "step": 308 + }, + { + "epoch": 0.6864759788947514, + "grad_norm": 0.026389723643660545, + "learning_rate": 0.0025703703703703704, + "loss": 7.0176, + "step": 309 + }, + { + "epoch": 0.6886975840044433, + "grad_norm": 0.03170786425471306, + "learning_rate": 0.002567901234567901, + "loss": 6.8322, + "step": 310 + }, + { + "epoch": 0.690919189114135, + "grad_norm": 0.02895709127187729, + "learning_rate": 0.0025654320987654323, + "loss": 7.0894, + "step": 311 + }, + { + "epoch": 0.6931407942238267, + "grad_norm": 0.025471147149801254, + "learning_rate": 0.002562962962962963, + "loss": 7.0324, + "step": 312 + }, + { + "epoch": 0.6953623993335185, + "grad_norm": 0.029405398294329643, + "learning_rate": 0.002560493827160494, + "loss": 6.8744, + "step": 313 + }, + { + "epoch": 0.6975840044432102, + "grad_norm": 0.02752537652850151, + "learning_rate": 0.0025580246913580245, + "loss": 6.8055, + "step": 314 + }, + { + "epoch": 0.6998056095529019, + "grad_norm": 0.024177823215723038, + "learning_rate": 0.0025555555555555557, + "loss": 7.3241, + "step": 315 + }, + { + "epoch": 0.7020272146625938, + "grad_norm": 0.02782405912876129, + "learning_rate": 0.0025530864197530864, + "loss": 7.0388, + "step": 316 + }, + { + "epoch": 0.7042488197722855, + "grad_norm": 0.026573065668344498, + "learning_rate": 0.002550617283950617, + "loss": 7.0159, + "step": 317 + }, + { + "epoch": 0.7064704248819772, + "grad_norm": 0.03358136862516403, + "learning_rate": 0.0025481481481481483, + "loss": 7.1215, + "step": 318 + }, + { + "epoch": 0.708692029991669, + "grad_norm": 0.03143709897994995, + "learning_rate": 0.002545679012345679, + "loss": 6.6386, + "step": 319 + }, + { + "epoch": 0.7109136351013607, + "grad_norm": 0.033959418535232544, + "learning_rate": 0.00254320987654321, + "loss": 6.8983, + "step": 320 + }, + { + "epoch": 0.7131352402110525, + "grad_norm": 0.024646824225783348, + "learning_rate": 0.0025407407407407405, + "loss": 6.8726, + "step": 321 + }, + { + "epoch": 0.7153568453207443, + "grad_norm": 0.02922394871711731, + "learning_rate": 0.0025382716049382717, + "loss": 6.9099, + "step": 322 + }, + { + "epoch": 0.717578450430436, + "grad_norm": 0.030439792200922966, + "learning_rate": 0.0025358024691358024, + "loss": 6.9323, + "step": 323 + }, + { + "epoch": 0.7198000555401277, + "grad_norm": 0.04333483427762985, + "learning_rate": 0.002533333333333333, + "loss": 6.9007, + "step": 324 + }, + { + "epoch": 0.7220216606498195, + "grad_norm": 0.03616077080368996, + "learning_rate": 0.0025308641975308644, + "loss": 6.8624, + "step": 325 + }, + { + "epoch": 0.7242432657595113, + "grad_norm": 0.033735282719135284, + "learning_rate": 0.002528395061728395, + "loss": 6.8608, + "step": 326 + }, + { + "epoch": 0.726464870869203, + "grad_norm": 0.03077850490808487, + "learning_rate": 0.0025259259259259263, + "loss": 7.1149, + "step": 327 + }, + { + "epoch": 0.7286864759788948, + "grad_norm": 0.03490167856216431, + "learning_rate": 0.0025234567901234566, + "loss": 6.9609, + "step": 328 + }, + { + "epoch": 0.7309080810885865, + "grad_norm": 0.029763774946331978, + "learning_rate": 0.0025209876543209877, + "loss": 6.9148, + "step": 329 + }, + { + "epoch": 0.7331296861982782, + "grad_norm": 0.029299907386302948, + "learning_rate": 0.0025185185185185185, + "loss": 6.9816, + "step": 330 + }, + { + "epoch": 0.7353512913079701, + "grad_norm": 0.02755190059542656, + "learning_rate": 0.0025160493827160496, + "loss": 6.724, + "step": 331 + }, + { + "epoch": 0.7375728964176618, + "grad_norm": 0.02832108922302723, + "learning_rate": 0.0025135802469135804, + "loss": 6.8974, + "step": 332 + }, + { + "epoch": 0.7397945015273535, + "grad_norm": 0.02688843198120594, + "learning_rate": 0.002511111111111111, + "loss": 7.1274, + "step": 333 + }, + { + "epoch": 0.7420161066370453, + "grad_norm": 0.030441652983427048, + "learning_rate": 0.0025086419753086423, + "loss": 6.9542, + "step": 334 + }, + { + "epoch": 0.744237711746737, + "grad_norm": 0.0377890020608902, + "learning_rate": 0.002506172839506173, + "loss": 7.0826, + "step": 335 + }, + { + "epoch": 0.7464593168564287, + "grad_norm": 0.03879869356751442, + "learning_rate": 0.0025037037037037037, + "loss": 7.051, + "step": 336 + }, + { + "epoch": 0.7486809219661206, + "grad_norm": 0.022517995908856392, + "learning_rate": 0.0025012345679012345, + "loss": 7.0527, + "step": 337 + }, + { + "epoch": 0.7509025270758123, + "grad_norm": 0.02492484450340271, + "learning_rate": 0.0024987654320987656, + "loss": 6.867, + "step": 338 + }, + { + "epoch": 0.753124132185504, + "grad_norm": 0.0263162013143301, + "learning_rate": 0.0024962962962962964, + "loss": 6.9205, + "step": 339 + }, + { + "epoch": 0.7553457372951958, + "grad_norm": 0.03145473077893257, + "learning_rate": 0.002493827160493827, + "loss": 7.1822, + "step": 340 + }, + { + "epoch": 0.7575673424048875, + "grad_norm": 0.02679683268070221, + "learning_rate": 0.0024913580246913583, + "loss": 7.0945, + "step": 341 + }, + { + "epoch": 0.7597889475145793, + "grad_norm": 0.03611517325043678, + "learning_rate": 0.002488888888888889, + "loss": 6.5617, + "step": 342 + }, + { + "epoch": 0.7620105526242711, + "grad_norm": 0.028081588447093964, + "learning_rate": 0.0024864197530864197, + "loss": 7.0982, + "step": 343 + }, + { + "epoch": 0.7642321577339628, + "grad_norm": 0.03137503191828728, + "learning_rate": 0.0024839506172839505, + "loss": 6.6402, + "step": 344 + }, + { + "epoch": 0.7664537628436545, + "grad_norm": 0.03392868861556053, + "learning_rate": 0.0024814814814814816, + "loss": 7.1155, + "step": 345 + }, + { + "epoch": 0.7686753679533463, + "grad_norm": 0.030533727258443832, + "learning_rate": 0.0024790123456790124, + "loss": 7.0098, + "step": 346 + }, + { + "epoch": 0.7708969730630381, + "grad_norm": 0.03094147890806198, + "learning_rate": 0.002476543209876543, + "loss": 6.7761, + "step": 347 + }, + { + "epoch": 0.7731185781727298, + "grad_norm": 0.025714484974741936, + "learning_rate": 0.0024740740740740743, + "loss": 6.8854, + "step": 348 + }, + { + "epoch": 0.7753401832824216, + "grad_norm": 0.031463462859392166, + "learning_rate": 0.002471604938271605, + "loss": 6.8518, + "step": 349 + }, + { + "epoch": 0.7775617883921133, + "grad_norm": 0.025870738551020622, + "learning_rate": 0.0024691358024691358, + "loss": 6.9806, + "step": 350 + }, + { + "epoch": 0.779783393501805, + "grad_norm": 0.029403753578662872, + "learning_rate": 0.0024666666666666665, + "loss": 6.801, + "step": 351 + }, + { + "epoch": 0.7820049986114969, + "grad_norm": 0.029973311349749565, + "learning_rate": 0.0024641975308641977, + "loss": 7.143, + "step": 352 + }, + { + "epoch": 0.7842266037211886, + "grad_norm": 0.02538173645734787, + "learning_rate": 0.0024617283950617284, + "loss": 6.7702, + "step": 353 + }, + { + "epoch": 0.7864482088308803, + "grad_norm": 0.02790713682770729, + "learning_rate": 0.002459259259259259, + "loss": 6.6072, + "step": 354 + }, + { + "epoch": 0.7886698139405721, + "grad_norm": 0.028936631977558136, + "learning_rate": 0.0024567901234567903, + "loss": 6.8102, + "step": 355 + }, + { + "epoch": 0.7908914190502638, + "grad_norm": 0.029574889689683914, + "learning_rate": 0.002454320987654321, + "loss": 7.088, + "step": 356 + }, + { + "epoch": 0.7931130241599555, + "grad_norm": 0.03510890156030655, + "learning_rate": 0.002451851851851852, + "loss": 6.6048, + "step": 357 + }, + { + "epoch": 0.7953346292696474, + "grad_norm": 0.03128746896982193, + "learning_rate": 0.0024493827160493825, + "loss": 6.9001, + "step": 358 + }, + { + "epoch": 0.7975562343793391, + "grad_norm": 0.030788661912083626, + "learning_rate": 0.0024469135802469137, + "loss": 6.8839, + "step": 359 + }, + { + "epoch": 0.7997778394890308, + "grad_norm": 0.028769545257091522, + "learning_rate": 0.0024444444444444444, + "loss": 6.9937, + "step": 360 + }, + { + "epoch": 0.8019994445987225, + "grad_norm": 0.028774607926607132, + "learning_rate": 0.0024419753086419756, + "loss": 6.877, + "step": 361 + }, + { + "epoch": 0.8042210497084143, + "grad_norm": 0.03091941773891449, + "learning_rate": 0.0024395061728395063, + "loss": 6.8068, + "step": 362 + }, + { + "epoch": 0.8064426548181061, + "grad_norm": 0.03130682557821274, + "learning_rate": 0.002437037037037037, + "loss": 6.851, + "step": 363 + }, + { + "epoch": 0.8086642599277978, + "grad_norm": 0.026035597547888756, + "learning_rate": 0.002434567901234568, + "loss": 6.8659, + "step": 364 + }, + { + "epoch": 0.8108858650374896, + "grad_norm": 0.027542999014258385, + "learning_rate": 0.002432098765432099, + "loss": 7.1436, + "step": 365 + }, + { + "epoch": 0.8131074701471813, + "grad_norm": 0.030278557911515236, + "learning_rate": 0.0024296296296296297, + "loss": 7.2488, + "step": 366 + }, + { + "epoch": 0.815329075256873, + "grad_norm": 0.031210409477353096, + "learning_rate": 0.0024271604938271604, + "loss": 6.7226, + "step": 367 + }, + { + "epoch": 0.8175506803665649, + "grad_norm": 0.03228698670864105, + "learning_rate": 0.0024246913580246916, + "loss": 7.0787, + "step": 368 + }, + { + "epoch": 0.8197722854762566, + "grad_norm": 0.034072816371917725, + "learning_rate": 0.0024222222222222223, + "loss": 6.7884, + "step": 369 + }, + { + "epoch": 0.8219938905859483, + "grad_norm": 0.02897859923541546, + "learning_rate": 0.002419753086419753, + "loss": 6.685, + "step": 370 + }, + { + "epoch": 0.8242154956956401, + "grad_norm": 0.03470846638083458, + "learning_rate": 0.002417283950617284, + "loss": 6.8452, + "step": 371 + }, + { + "epoch": 0.8264371008053318, + "grad_norm": 0.039191946387290955, + "learning_rate": 0.002414814814814815, + "loss": 6.8654, + "step": 372 + }, + { + "epoch": 0.8286587059150236, + "grad_norm": 0.027019087225198746, + "learning_rate": 0.0024123456790123457, + "loss": 6.7975, + "step": 373 + }, + { + "epoch": 0.8308803110247154, + "grad_norm": 0.028417536988854408, + "learning_rate": 0.0024098765432098764, + "loss": 7.179, + "step": 374 + }, + { + "epoch": 0.8331019161344071, + "grad_norm": 0.031571824103593826, + "learning_rate": 0.0024074074074074076, + "loss": 6.6972, + "step": 375 + }, + { + "epoch": 0.8353235212440988, + "grad_norm": 0.025722427293658257, + "learning_rate": 0.0024049382716049383, + "loss": 6.7311, + "step": 376 + }, + { + "epoch": 0.8375451263537906, + "grad_norm": 0.03283727914094925, + "learning_rate": 0.002402469135802469, + "loss": 6.7464, + "step": 377 + }, + { + "epoch": 0.8397667314634824, + "grad_norm": 0.025677897036075592, + "learning_rate": 0.0024000000000000002, + "loss": 6.8963, + "step": 378 + }, + { + "epoch": 0.8419883365731741, + "grad_norm": 0.047866322100162506, + "learning_rate": 0.002397530864197531, + "loss": 6.7552, + "step": 379 + }, + { + "epoch": 0.8442099416828659, + "grad_norm": 0.025421174243092537, + "learning_rate": 0.0023950617283950617, + "loss": 6.8062, + "step": 380 + }, + { + "epoch": 0.8464315467925576, + "grad_norm": 0.03164597600698471, + "learning_rate": 0.0023925925925925924, + "loss": 6.7044, + "step": 381 + }, + { + "epoch": 0.8486531519022493, + "grad_norm": 0.03499810770153999, + "learning_rate": 0.0023901234567901236, + "loss": 6.8009, + "step": 382 + }, + { + "epoch": 0.8508747570119412, + "grad_norm": 0.03318408131599426, + "learning_rate": 0.0023876543209876543, + "loss": 6.861, + "step": 383 + }, + { + "epoch": 0.8530963621216329, + "grad_norm": 0.030897561460733414, + "learning_rate": 0.002385185185185185, + "loss": 6.7319, + "step": 384 + }, + { + "epoch": 0.8553179672313246, + "grad_norm": 0.035121310502290726, + "learning_rate": 0.0023827160493827162, + "loss": 6.9468, + "step": 385 + }, + { + "epoch": 0.8575395723410164, + "grad_norm": 0.02977081760764122, + "learning_rate": 0.002380246913580247, + "loss": 6.6821, + "step": 386 + }, + { + "epoch": 0.8597611774507081, + "grad_norm": 0.027311140671372414, + "learning_rate": 0.002377777777777778, + "loss": 6.8621, + "step": 387 + }, + { + "epoch": 0.8619827825603998, + "grad_norm": 0.027461953461170197, + "learning_rate": 0.0023753086419753084, + "loss": 7.0028, + "step": 388 + }, + { + "epoch": 0.8642043876700917, + "grad_norm": 0.02862822264432907, + "learning_rate": 0.0023728395061728396, + "loss": 6.726, + "step": 389 + }, + { + "epoch": 0.8664259927797834, + "grad_norm": 0.02961575612425804, + "learning_rate": 0.0023703703703703703, + "loss": 6.6011, + "step": 390 + }, + { + "epoch": 0.8686475978894751, + "grad_norm": 0.02995389699935913, + "learning_rate": 0.0023679012345679015, + "loss": 6.5965, + "step": 391 + }, + { + "epoch": 0.8708692029991669, + "grad_norm": 0.024569949135184288, + "learning_rate": 0.0023654320987654322, + "loss": 6.6053, + "step": 392 + }, + { + "epoch": 0.8730908081088586, + "grad_norm": 0.04112711548805237, + "learning_rate": 0.002362962962962963, + "loss": 6.7283, + "step": 393 + }, + { + "epoch": 0.8753124132185504, + "grad_norm": 0.03940947726368904, + "learning_rate": 0.002360493827160494, + "loss": 7.1214, + "step": 394 + }, + { + "epoch": 0.8775340183282422, + "grad_norm": 0.03772582858800888, + "learning_rate": 0.002358024691358025, + "loss": 6.7342, + "step": 395 + }, + { + "epoch": 0.8797556234379339, + "grad_norm": 0.026038886979222298, + "learning_rate": 0.0023555555555555556, + "loss": 6.7722, + "step": 396 + }, + { + "epoch": 0.8819772285476256, + "grad_norm": 0.02900727652013302, + "learning_rate": 0.0023530864197530863, + "loss": 6.6497, + "step": 397 + }, + { + "epoch": 0.8841988336573174, + "grad_norm": 0.03674020990729332, + "learning_rate": 0.0023506172839506175, + "loss": 6.4395, + "step": 398 + }, + { + "epoch": 0.8864204387670092, + "grad_norm": 0.030998943373560905, + "learning_rate": 0.002348148148148148, + "loss": 6.6632, + "step": 399 + }, + { + "epoch": 0.8886420438767009, + "grad_norm": 0.03351856395602226, + "learning_rate": 0.002345679012345679, + "loss": 6.6518, + "step": 400 + }, + { + "epoch": 0.8908636489863927, + "grad_norm": 0.030786389485001564, + "learning_rate": 0.00234320987654321, + "loss": 6.7591, + "step": 401 + }, + { + "epoch": 0.8930852540960844, + "grad_norm": 0.03690814599394798, + "learning_rate": 0.002340740740740741, + "loss": 6.5874, + "step": 402 + }, + { + "epoch": 0.8953068592057761, + "grad_norm": 0.030751686543226242, + "learning_rate": 0.0023382716049382716, + "loss": 6.6858, + "step": 403 + }, + { + "epoch": 0.897528464315468, + "grad_norm": 0.03505620360374451, + "learning_rate": 0.0023358024691358024, + "loss": 6.6377, + "step": 404 + }, + { + "epoch": 0.8997500694251597, + "grad_norm": 0.034183319658041, + "learning_rate": 0.0023333333333333335, + "loss": 6.6526, + "step": 405 + }, + { + "epoch": 0.9019716745348514, + "grad_norm": 0.03079032339155674, + "learning_rate": 0.0023308641975308643, + "loss": 6.7385, + "step": 406 + }, + { + "epoch": 0.9041932796445432, + "grad_norm": 0.03357497230172157, + "learning_rate": 0.002328395061728395, + "loss": 6.9186, + "step": 407 + }, + { + "epoch": 0.9064148847542349, + "grad_norm": 0.030285147950053215, + "learning_rate": 0.002325925925925926, + "loss": 6.8228, + "step": 408 + }, + { + "epoch": 0.9086364898639266, + "grad_norm": 0.03262084349989891, + "learning_rate": 0.002323456790123457, + "loss": 6.6384, + "step": 409 + }, + { + "epoch": 0.9108580949736185, + "grad_norm": 0.03636816889047623, + "learning_rate": 0.0023209876543209876, + "loss": 7.0531, + "step": 410 + }, + { + "epoch": 0.9130797000833102, + "grad_norm": 0.04130619764328003, + "learning_rate": 0.0023185185185185184, + "loss": 6.5445, + "step": 411 + }, + { + "epoch": 0.9153013051930019, + "grad_norm": 0.03653937578201294, + "learning_rate": 0.0023160493827160495, + "loss": 6.7057, + "step": 412 + }, + { + "epoch": 0.9175229103026937, + "grad_norm": 0.032455723732709885, + "learning_rate": 0.0023135802469135803, + "loss": 6.5941, + "step": 413 + }, + { + "epoch": 0.9197445154123854, + "grad_norm": 0.03412288427352905, + "learning_rate": 0.002311111111111111, + "loss": 6.9319, + "step": 414 + }, + { + "epoch": 0.9219661205220772, + "grad_norm": 0.035218141973018646, + "learning_rate": 0.002308641975308642, + "loss": 6.3276, + "step": 415 + }, + { + "epoch": 0.924187725631769, + "grad_norm": 0.03169059380888939, + "learning_rate": 0.002306172839506173, + "loss": 6.7758, + "step": 416 + }, + { + "epoch": 0.9264093307414607, + "grad_norm": 0.024801066145300865, + "learning_rate": 0.0023037037037037036, + "loss": 6.7239, + "step": 417 + }, + { + "epoch": 0.9286309358511524, + "grad_norm": 0.02555897831916809, + "learning_rate": 0.0023012345679012344, + "loss": 6.6137, + "step": 418 + }, + { + "epoch": 0.9308525409608442, + "grad_norm": 0.031776994466781616, + "learning_rate": 0.0022987654320987655, + "loss": 6.7518, + "step": 419 + }, + { + "epoch": 0.933074146070536, + "grad_norm": 0.03202354535460472, + "learning_rate": 0.0022962962962962963, + "loss": 6.9085, + "step": 420 + }, + { + "epoch": 0.9352957511802277, + "grad_norm": 0.029564661905169487, + "learning_rate": 0.002293827160493827, + "loss": 6.7439, + "step": 421 + }, + { + "epoch": 0.9375173562899195, + "grad_norm": 0.02673780918121338, + "learning_rate": 0.002291358024691358, + "loss": 6.8067, + "step": 422 + }, + { + "epoch": 0.9397389613996112, + "grad_norm": 0.031321026384830475, + "learning_rate": 0.002288888888888889, + "loss": 6.5093, + "step": 423 + }, + { + "epoch": 0.9419605665093029, + "grad_norm": 0.027808304876089096, + "learning_rate": 0.00228641975308642, + "loss": 6.4626, + "step": 424 + }, + { + "epoch": 0.9441821716189948, + "grad_norm": 0.025776829570531845, + "learning_rate": 0.0022839506172839504, + "loss": 6.7953, + "step": 425 + }, + { + "epoch": 0.9464037767286865, + "grad_norm": 0.03336632251739502, + "learning_rate": 0.0022814814814814816, + "loss": 6.5457, + "step": 426 + }, + { + "epoch": 0.9486253818383782, + "grad_norm": 0.028170697391033173, + "learning_rate": 0.0022790123456790123, + "loss": 6.785, + "step": 427 + }, + { + "epoch": 0.95084698694807, + "grad_norm": 0.03778989240527153, + "learning_rate": 0.0022765432098765435, + "loss": 6.6185, + "step": 428 + }, + { + "epoch": 0.9530685920577617, + "grad_norm": 0.03384809568524361, + "learning_rate": 0.0022740740740740738, + "loss": 6.6729, + "step": 429 + }, + { + "epoch": 0.9552901971674534, + "grad_norm": 0.03491503372788429, + "learning_rate": 0.002271604938271605, + "loss": 6.506, + "step": 430 + }, + { + "epoch": 0.9575118022771453, + "grad_norm": 0.028737282380461693, + "learning_rate": 0.002269135802469136, + "loss": 6.5785, + "step": 431 + }, + { + "epoch": 0.959733407386837, + "grad_norm": 0.029536008834838867, + "learning_rate": 0.002266666666666667, + "loss": 6.5539, + "step": 432 + }, + { + "epoch": 0.9619550124965287, + "grad_norm": 0.029247025027871132, + "learning_rate": 0.0022641975308641976, + "loss": 6.4651, + "step": 433 + }, + { + "epoch": 0.9641766176062205, + "grad_norm": 0.037673089653253555, + "learning_rate": 0.0022617283950617283, + "loss": 6.3081, + "step": 434 + }, + { + "epoch": 0.9663982227159122, + "grad_norm": 0.031512826681137085, + "learning_rate": 0.0022592592592592595, + "loss": 6.8234, + "step": 435 + }, + { + "epoch": 0.968619827825604, + "grad_norm": 0.03206060081720352, + "learning_rate": 0.00225679012345679, + "loss": 6.5044, + "step": 436 + }, + { + "epoch": 0.9708414329352958, + "grad_norm": 0.02888185903429985, + "learning_rate": 0.002254320987654321, + "loss": 6.301, + "step": 437 + }, + { + "epoch": 0.9730630380449875, + "grad_norm": 0.03852174058556557, + "learning_rate": 0.002251851851851852, + "loss": 6.614, + "step": 438 + }, + { + "epoch": 0.9752846431546792, + "grad_norm": 0.02977623976767063, + "learning_rate": 0.002249382716049383, + "loss": 6.8427, + "step": 439 + }, + { + "epoch": 0.977506248264371, + "grad_norm": 0.036224476993083954, + "learning_rate": 0.0022469135802469136, + "loss": 6.6819, + "step": 440 + }, + { + "epoch": 0.9797278533740628, + "grad_norm": 0.028337834402918816, + "learning_rate": 0.0022444444444444443, + "loss": 6.9447, + "step": 441 + }, + { + "epoch": 0.9819494584837545, + "grad_norm": 0.032608017325401306, + "learning_rate": 0.0022419753086419755, + "loss": 6.521, + "step": 442 + }, + { + "epoch": 0.9841710635934463, + "grad_norm": 0.030470674857497215, + "learning_rate": 0.002239506172839506, + "loss": 6.3275, + "step": 443 + }, + { + "epoch": 0.986392668703138, + "grad_norm": 0.045779451727867126, + "learning_rate": 0.002237037037037037, + "loss": 6.4221, + "step": 444 + }, + { + "epoch": 0.9886142738128297, + "grad_norm": 0.03529239073395729, + "learning_rate": 0.002234567901234568, + "loss": 6.1334, + "step": 445 + }, + { + "epoch": 0.9908358789225216, + "grad_norm": 0.04795719310641289, + "learning_rate": 0.002232098765432099, + "loss": 6.6265, + "step": 446 + }, + { + "epoch": 0.9930574840322133, + "grad_norm": 0.0322149284183979, + "learning_rate": 0.0022296296296296296, + "loss": 6.3979, + "step": 447 + }, + { + "epoch": 0.995279089141905, + "grad_norm": 0.030884549021720886, + "learning_rate": 0.0022271604938271603, + "loss": 6.5888, + "step": 448 + }, + { + "epoch": 0.9975006942515968, + "grad_norm": 0.03471051901578903, + "learning_rate": 0.0022246913580246915, + "loss": 6.4639, + "step": 449 + }, + { + "epoch": 0.9997222993612885, + "grad_norm": 0.029371246695518494, + "learning_rate": 0.0022222222222222222, + "loss": 6.6529, + "step": 450 + }, + { + "epoch": 1.0019439044709804, + "grad_norm": 0.02985074557363987, + "learning_rate": 0.002219753086419753, + "loss": 6.3826, + "step": 451 + }, + { + "epoch": 1.004165509580672, + "grad_norm": 0.03519700467586517, + "learning_rate": 0.002217283950617284, + "loss": 6.4751, + "step": 452 + }, + { + "epoch": 1.0063871146903638, + "grad_norm": 0.037781670689582825, + "learning_rate": 0.002214814814814815, + "loss": 6.4896, + "step": 453 + }, + { + "epoch": 1.0086087198000555, + "grad_norm": 0.027791369706392288, + "learning_rate": 0.002212345679012346, + "loss": 6.6676, + "step": 454 + }, + { + "epoch": 1.0108303249097472, + "grad_norm": 0.0314226932823658, + "learning_rate": 0.0022098765432098763, + "loss": 6.4966, + "step": 455 + }, + { + "epoch": 1.013051930019439, + "grad_norm": 0.03207942843437195, + "learning_rate": 0.0022074074074074075, + "loss": 6.7773, + "step": 456 + }, + { + "epoch": 1.0152735351291309, + "grad_norm": 0.03154020011425018, + "learning_rate": 0.0022049382716049382, + "loss": 6.5173, + "step": 457 + }, + { + "epoch": 1.0174951402388226, + "grad_norm": 0.029535001143813133, + "learning_rate": 0.0022024691358024694, + "loss": 6.6344, + "step": 458 + }, + { + "epoch": 1.0197167453485143, + "grad_norm": 0.0366988480091095, + "learning_rate": 0.0021999999999999997, + "loss": 5.9945, + "step": 459 + }, + { + "epoch": 1.021938350458206, + "grad_norm": 0.03053160198032856, + "learning_rate": 0.002197530864197531, + "loss": 6.4279, + "step": 460 + }, + { + "epoch": 1.0241599555678977, + "grad_norm": 0.02893592044711113, + "learning_rate": 0.002195061728395062, + "loss": 6.5733, + "step": 461 + }, + { + "epoch": 1.0263815606775895, + "grad_norm": 0.031111309304833412, + "learning_rate": 0.0021925925925925928, + "loss": 6.6382, + "step": 462 + }, + { + "epoch": 1.0286031657872814, + "grad_norm": 0.03582914173603058, + "learning_rate": 0.0021901234567901235, + "loss": 6.3495, + "step": 463 + }, + { + "epoch": 1.030824770896973, + "grad_norm": 0.029999269172549248, + "learning_rate": 0.0021876543209876542, + "loss": 6.8815, + "step": 464 + }, + { + "epoch": 1.0330463760066648, + "grad_norm": 0.03019753284752369, + "learning_rate": 0.0021851851851851854, + "loss": 6.7338, + "step": 465 + }, + { + "epoch": 1.0352679811163565, + "grad_norm": 0.03386778384447098, + "learning_rate": 0.002182716049382716, + "loss": 6.3456, + "step": 466 + }, + { + "epoch": 1.0374895862260483, + "grad_norm": 0.033411286771297455, + "learning_rate": 0.002180246913580247, + "loss": 6.6192, + "step": 467 + }, + { + "epoch": 1.03971119133574, + "grad_norm": 0.03734864294528961, + "learning_rate": 0.002177777777777778, + "loss": 6.5337, + "step": 468 + }, + { + "epoch": 1.041932796445432, + "grad_norm": 0.034468065947294235, + "learning_rate": 0.0021753086419753088, + "loss": 6.4662, + "step": 469 + }, + { + "epoch": 1.0441544015551236, + "grad_norm": 0.029232153668999672, + "learning_rate": 0.0021728395061728395, + "loss": 6.603, + "step": 470 + }, + { + "epoch": 1.0463760066648153, + "grad_norm": 0.035031069070100784, + "learning_rate": 0.0021703703703703702, + "loss": 6.4214, + "step": 471 + }, + { + "epoch": 1.048597611774507, + "grad_norm": 0.05113089084625244, + "learning_rate": 0.0021679012345679014, + "loss": 6.6573, + "step": 472 + }, + { + "epoch": 1.0508192168841988, + "grad_norm": 0.03398042917251587, + "learning_rate": 0.002165432098765432, + "loss": 6.4512, + "step": 473 + }, + { + "epoch": 1.0530408219938905, + "grad_norm": 0.032400310039520264, + "learning_rate": 0.002162962962962963, + "loss": 6.6219, + "step": 474 + }, + { + "epoch": 1.0552624271035824, + "grad_norm": 0.0310080386698246, + "learning_rate": 0.002160493827160494, + "loss": 6.7245, + "step": 475 + }, + { + "epoch": 1.0574840322132741, + "grad_norm": 0.02772977575659752, + "learning_rate": 0.002158024691358025, + "loss": 6.3142, + "step": 476 + }, + { + "epoch": 1.0597056373229659, + "grad_norm": 0.029645215719938278, + "learning_rate": 0.0021555555555555555, + "loss": 6.3477, + "step": 477 + }, + { + "epoch": 1.0619272424326576, + "grad_norm": 0.03764863312244415, + "learning_rate": 0.0021530864197530863, + "loss": 6.0773, + "step": 478 + }, + { + "epoch": 1.0641488475423493, + "grad_norm": 0.034320443868637085, + "learning_rate": 0.0021506172839506174, + "loss": 6.0489, + "step": 479 + }, + { + "epoch": 1.066370452652041, + "grad_norm": 0.03357730433344841, + "learning_rate": 0.002148148148148148, + "loss": 6.6008, + "step": 480 + }, + { + "epoch": 1.068592057761733, + "grad_norm": 0.030703729018568993, + "learning_rate": 0.002145679012345679, + "loss": 6.3549, + "step": 481 + }, + { + "epoch": 1.0708136628714247, + "grad_norm": 0.029614439234137535, + "learning_rate": 0.0021432098765432096, + "loss": 6.4135, + "step": 482 + }, + { + "epoch": 1.0730352679811164, + "grad_norm": 0.030537104234099388, + "learning_rate": 0.002140740740740741, + "loss": 6.3719, + "step": 483 + }, + { + "epoch": 1.075256873090808, + "grad_norm": 0.028199033811688423, + "learning_rate": 0.002138271604938272, + "loss": 6.6597, + "step": 484 + }, + { + "epoch": 1.0774784782004998, + "grad_norm": 0.03533554822206497, + "learning_rate": 0.0021358024691358023, + "loss": 6.654, + "step": 485 + }, + { + "epoch": 1.0797000833101915, + "grad_norm": 0.03641669452190399, + "learning_rate": 0.0021333333333333334, + "loss": 6.4385, + "step": 486 + }, + { + "epoch": 1.0819216884198835, + "grad_norm": 0.03398216515779495, + "learning_rate": 0.002130864197530864, + "loss": 6.6636, + "step": 487 + }, + { + "epoch": 1.0841432935295752, + "grad_norm": 0.041564710438251495, + "learning_rate": 0.0021283950617283953, + "loss": 6.8422, + "step": 488 + }, + { + "epoch": 1.0863648986392669, + "grad_norm": 0.038841165602207184, + "learning_rate": 0.0021259259259259256, + "loss": 6.3757, + "step": 489 + }, + { + "epoch": 1.0885865037489586, + "grad_norm": 0.033147819340229034, + "learning_rate": 0.002123456790123457, + "loss": 6.5191, + "step": 490 + }, + { + "epoch": 1.0908081088586503, + "grad_norm": 0.02892182767391205, + "learning_rate": 0.002120987654320988, + "loss": 6.4549, + "step": 491 + }, + { + "epoch": 1.093029713968342, + "grad_norm": 0.029515832662582397, + "learning_rate": 0.0021185185185185187, + "loss": 6.2039, + "step": 492 + }, + { + "epoch": 1.095251319078034, + "grad_norm": 0.03112645447254181, + "learning_rate": 0.0021160493827160494, + "loss": 6.5373, + "step": 493 + }, + { + "epoch": 1.0974729241877257, + "grad_norm": 0.03828863427042961, + "learning_rate": 0.00211358024691358, + "loss": 6.6497, + "step": 494 + }, + { + "epoch": 1.0996945292974174, + "grad_norm": 0.033057838678359985, + "learning_rate": 0.0021111111111111113, + "loss": 6.3881, + "step": 495 + }, + { + "epoch": 1.1019161344071091, + "grad_norm": 0.027790391817688942, + "learning_rate": 0.002108641975308642, + "loss": 6.4662, + "step": 496 + }, + { + "epoch": 1.1041377395168008, + "grad_norm": 0.03252388536930084, + "learning_rate": 0.002106172839506173, + "loss": 6.5765, + "step": 497 + }, + { + "epoch": 1.1063593446264925, + "grad_norm": 0.03879280760884285, + "learning_rate": 0.002103703703703704, + "loss": 6.2594, + "step": 498 + }, + { + "epoch": 1.1085809497361845, + "grad_norm": 0.04156181216239929, + "learning_rate": 0.0021012345679012347, + "loss": 7.0039, + "step": 499 + }, + { + "epoch": 1.1108025548458762, + "grad_norm": 0.03543870523571968, + "learning_rate": 0.0020987654320987655, + "loss": 6.3444, + "step": 500 + }, + { + "epoch": 1.113024159955568, + "grad_norm": 0.05665072053670883, + "learning_rate": 0.002096296296296296, + "loss": 6.3474, + "step": 501 + }, + { + "epoch": 1.1152457650652596, + "grad_norm": 0.0283832885324955, + "learning_rate": 0.0020938271604938274, + "loss": 6.5432, + "step": 502 + }, + { + "epoch": 1.1174673701749513, + "grad_norm": 0.03598196804523468, + "learning_rate": 0.002091358024691358, + "loss": 6.3492, + "step": 503 + }, + { + "epoch": 1.119688975284643, + "grad_norm": 0.03885941207408905, + "learning_rate": 0.002088888888888889, + "loss": 6.5366, + "step": 504 + }, + { + "epoch": 1.121910580394335, + "grad_norm": 0.0317206010222435, + "learning_rate": 0.00208641975308642, + "loss": 6.4539, + "step": 505 + }, + { + "epoch": 1.1241321855040267, + "grad_norm": 0.040018461644649506, + "learning_rate": 0.0020839506172839507, + "loss": 6.2432, + "step": 506 + }, + { + "epoch": 1.1263537906137184, + "grad_norm": 0.04189518094062805, + "learning_rate": 0.0020814814814814815, + "loss": 6.5315, + "step": 507 + }, + { + "epoch": 1.1285753957234101, + "grad_norm": 0.02969651110470295, + "learning_rate": 0.002079012345679012, + "loss": 6.2752, + "step": 508 + }, + { + "epoch": 1.1307970008331019, + "grad_norm": 0.037640877068042755, + "learning_rate": 0.0020765432098765434, + "loss": 6.667, + "step": 509 + }, + { + "epoch": 1.1330186059427936, + "grad_norm": 0.035663068294525146, + "learning_rate": 0.002074074074074074, + "loss": 6.5661, + "step": 510 + }, + { + "epoch": 1.1352402110524855, + "grad_norm": 0.04258381202816963, + "learning_rate": 0.002071604938271605, + "loss": 6.4601, + "step": 511 + }, + { + "epoch": 1.1374618161621772, + "grad_norm": 0.0322837308049202, + "learning_rate": 0.0020691358024691356, + "loss": 6.2217, + "step": 512 + }, + { + "epoch": 1.139683421271869, + "grad_norm": 0.0373774953186512, + "learning_rate": 0.0020666666666666667, + "loss": 6.1486, + "step": 513 + }, + { + "epoch": 1.1419050263815607, + "grad_norm": 0.03975836560130119, + "learning_rate": 0.002064197530864198, + "loss": 6.3465, + "step": 514 + }, + { + "epoch": 1.1441266314912524, + "grad_norm": 0.029985908418893814, + "learning_rate": 0.002061728395061728, + "loss": 6.6245, + "step": 515 + }, + { + "epoch": 1.146348236600944, + "grad_norm": 0.03610779717564583, + "learning_rate": 0.0020592592592592594, + "loss": 6.0818, + "step": 516 + }, + { + "epoch": 1.148569841710636, + "grad_norm": 0.03605785220861435, + "learning_rate": 0.00205679012345679, + "loss": 6.6951, + "step": 517 + }, + { + "epoch": 1.1507914468203277, + "grad_norm": 0.03263959288597107, + "learning_rate": 0.0020543209876543213, + "loss": 6.3446, + "step": 518 + }, + { + "epoch": 1.1530130519300195, + "grad_norm": 0.0429079495370388, + "learning_rate": 0.0020518518518518516, + "loss": 6.4425, + "step": 519 + }, + { + "epoch": 1.1552346570397112, + "grad_norm": 0.04839729890227318, + "learning_rate": 0.0020493827160493827, + "loss": 6.3836, + "step": 520 + }, + { + "epoch": 1.157456262149403, + "grad_norm": 0.03985445573925972, + "learning_rate": 0.002046913580246914, + "loss": 6.3422, + "step": 521 + }, + { + "epoch": 1.1596778672590946, + "grad_norm": 0.03695117309689522, + "learning_rate": 0.0020444444444444447, + "loss": 6.5901, + "step": 522 + }, + { + "epoch": 1.1618994723687863, + "grad_norm": 0.03463996574282646, + "learning_rate": 0.0020419753086419754, + "loss": 6.4557, + "step": 523 + }, + { + "epoch": 1.1641210774784783, + "grad_norm": 0.037514619529247284, + "learning_rate": 0.002039506172839506, + "loss": 6.0191, + "step": 524 + }, + { + "epoch": 1.16634268258817, + "grad_norm": 0.03486974164843559, + "learning_rate": 0.0020370370370370373, + "loss": 6.487, + "step": 525 + }, + { + "epoch": 1.1685642876978617, + "grad_norm": 0.05132891237735748, + "learning_rate": 0.002034567901234568, + "loss": 6.6569, + "step": 526 + }, + { + "epoch": 1.1707858928075534, + "grad_norm": 0.040708813816308975, + "learning_rate": 0.0020320987654320988, + "loss": 6.056, + "step": 527 + }, + { + "epoch": 1.1730074979172451, + "grad_norm": 0.044833164662122726, + "learning_rate": 0.00202962962962963, + "loss": 6.388, + "step": 528 + }, + { + "epoch": 1.175229103026937, + "grad_norm": 0.027963142842054367, + "learning_rate": 0.0020271604938271607, + "loss": 6.5797, + "step": 529 + }, + { + "epoch": 1.1774507081366288, + "grad_norm": 0.0402199923992157, + "learning_rate": 0.0020246913580246914, + "loss": 6.7003, + "step": 530 + }, + { + "epoch": 1.1796723132463205, + "grad_norm": 0.04106440767645836, + "learning_rate": 0.002022222222222222, + "loss": 6.3391, + "step": 531 + }, + { + "epoch": 1.1818939183560122, + "grad_norm": 0.03796447440981865, + "learning_rate": 0.0020197530864197533, + "loss": 6.4775, + "step": 532 + }, + { + "epoch": 1.184115523465704, + "grad_norm": 0.0440424308180809, + "learning_rate": 0.002017283950617284, + "loss": 6.4393, + "step": 533 + }, + { + "epoch": 1.1863371285753956, + "grad_norm": 0.041621048003435135, + "learning_rate": 0.0020148148148148148, + "loss": 6.1435, + "step": 534 + }, + { + "epoch": 1.1885587336850874, + "grad_norm": 0.0351857990026474, + "learning_rate": 0.002012345679012346, + "loss": 6.2345, + "step": 535 + }, + { + "epoch": 1.1907803387947793, + "grad_norm": 0.04070345312356949, + "learning_rate": 0.0020098765432098767, + "loss": 6.0336, + "step": 536 + }, + { + "epoch": 1.193001943904471, + "grad_norm": 0.03114120475947857, + "learning_rate": 0.0020074074074074074, + "loss": 6.4161, + "step": 537 + }, + { + "epoch": 1.1952235490141627, + "grad_norm": 0.04014582931995392, + "learning_rate": 0.002004938271604938, + "loss": 6.3247, + "step": 538 + }, + { + "epoch": 1.1974451541238544, + "grad_norm": 0.03971127048134804, + "learning_rate": 0.0020024691358024693, + "loss": 6.4709, + "step": 539 + }, + { + "epoch": 1.1996667592335462, + "grad_norm": 0.03852924704551697, + "learning_rate": 0.002, + "loss": 6.3638, + "step": 540 + }, + { + "epoch": 1.201888364343238, + "grad_norm": 0.03519389033317566, + "learning_rate": 0.0019975308641975308, + "loss": 6.6442, + "step": 541 + }, + { + "epoch": 1.2041099694529298, + "grad_norm": 0.04014746472239494, + "learning_rate": 0.0019950617283950615, + "loss": 6.3269, + "step": 542 + }, + { + "epoch": 1.2063315745626215, + "grad_norm": 0.03208151459693909, + "learning_rate": 0.0019925925925925927, + "loss": 6.2944, + "step": 543 + }, + { + "epoch": 1.2085531796723132, + "grad_norm": 0.03822821378707886, + "learning_rate": 0.001990123456790124, + "loss": 6.6407, + "step": 544 + }, + { + "epoch": 1.210774784782005, + "grad_norm": 0.046587634831666946, + "learning_rate": 0.001987654320987654, + "loss": 6.593, + "step": 545 + }, + { + "epoch": 1.2129963898916967, + "grad_norm": 0.042928680777549744, + "learning_rate": 0.0019851851851851853, + "loss": 6.3402, + "step": 546 + }, + { + "epoch": 1.2152179950013884, + "grad_norm": 0.03751085326075554, + "learning_rate": 0.001982716049382716, + "loss": 6.3294, + "step": 547 + }, + { + "epoch": 1.2174396001110803, + "grad_norm": 0.03686273843050003, + "learning_rate": 0.001980246913580247, + "loss": 6.4659, + "step": 548 + }, + { + "epoch": 1.219661205220772, + "grad_norm": 0.03327074274420738, + "learning_rate": 0.0019777777777777775, + "loss": 6.2969, + "step": 549 + }, + { + "epoch": 1.2218828103304638, + "grad_norm": 0.032279789447784424, + "learning_rate": 0.0019753086419753087, + "loss": 6.1469, + "step": 550 + }, + { + "epoch": 1.2241044154401555, + "grad_norm": 0.051666878163814545, + "learning_rate": 0.00197283950617284, + "loss": 6.2377, + "step": 551 + }, + { + "epoch": 1.2263260205498472, + "grad_norm": 0.0387309193611145, + "learning_rate": 0.00197037037037037, + "loss": 6.0148, + "step": 552 + }, + { + "epoch": 1.2285476256595391, + "grad_norm": 0.04013807326555252, + "learning_rate": 0.0019679012345679013, + "loss": 6.3446, + "step": 553 + }, + { + "epoch": 1.2307692307692308, + "grad_norm": 0.03821302205324173, + "learning_rate": 0.001965432098765432, + "loss": 6.666, + "step": 554 + }, + { + "epoch": 1.2329908358789226, + "grad_norm": 0.03850986063480377, + "learning_rate": 0.0019629629629629632, + "loss": 6.3285, + "step": 555 + }, + { + "epoch": 1.2352124409886143, + "grad_norm": 0.036970287561416626, + "learning_rate": 0.0019604938271604935, + "loss": 6.5845, + "step": 556 + }, + { + "epoch": 1.237434046098306, + "grad_norm": 0.03782539442181587, + "learning_rate": 0.0019580246913580247, + "loss": 6.3414, + "step": 557 + }, + { + "epoch": 1.2396556512079977, + "grad_norm": 0.038746099919080734, + "learning_rate": 0.001955555555555556, + "loss": 6.5084, + "step": 558 + }, + { + "epoch": 1.2418772563176894, + "grad_norm": 0.03104456327855587, + "learning_rate": 0.0019530864197530864, + "loss": 6.1361, + "step": 559 + }, + { + "epoch": 1.2440988614273814, + "grad_norm": 0.039266884326934814, + "learning_rate": 0.0019506172839506173, + "loss": 6.5168, + "step": 560 + }, + { + "epoch": 1.246320466537073, + "grad_norm": 0.04455557093024254, + "learning_rate": 0.001948148148148148, + "loss": 6.4887, + "step": 561 + }, + { + "epoch": 1.2485420716467648, + "grad_norm": 0.03722786903381348, + "learning_rate": 0.001945679012345679, + "loss": 6.3669, + "step": 562 + }, + { + "epoch": 1.2507636767564565, + "grad_norm": 0.06070278212428093, + "learning_rate": 0.0019432098765432098, + "loss": 6.1768, + "step": 563 + }, + { + "epoch": 1.2529852818661482, + "grad_norm": 0.03736662119626999, + "learning_rate": 0.0019407407407407407, + "loss": 6.2368, + "step": 564 + }, + { + "epoch": 1.2552068869758402, + "grad_norm": 0.04633970558643341, + "learning_rate": 0.0019382716049382714, + "loss": 6.4326, + "step": 565 + }, + { + "epoch": 1.2574284920855319, + "grad_norm": 0.03372138366103172, + "learning_rate": 0.0019358024691358024, + "loss": 6.2443, + "step": 566 + }, + { + "epoch": 1.2596500971952236, + "grad_norm": 0.0386214554309845, + "learning_rate": 0.0019333333333333336, + "loss": 6.2969, + "step": 567 + }, + { + "epoch": 1.2618717023049153, + "grad_norm": 0.05762870982289314, + "learning_rate": 0.001930864197530864, + "loss": 6.321, + "step": 568 + }, + { + "epoch": 1.264093307414607, + "grad_norm": 0.03465733677148819, + "learning_rate": 0.0019283950617283952, + "loss": 6.2286, + "step": 569 + }, + { + "epoch": 1.2663149125242987, + "grad_norm": 0.02819642797112465, + "learning_rate": 0.0019259259259259258, + "loss": 6.0266, + "step": 570 + }, + { + "epoch": 1.2685365176339904, + "grad_norm": 0.033979251980781555, + "learning_rate": 0.001923456790123457, + "loss": 6.6357, + "step": 571 + }, + { + "epoch": 1.2707581227436824, + "grad_norm": 0.033352021127939224, + "learning_rate": 0.0019209876543209875, + "loss": 6.1982, + "step": 572 + }, + { + "epoch": 1.272979727853374, + "grad_norm": 0.0427844263613224, + "learning_rate": 0.0019185185185185186, + "loss": 6.2508, + "step": 573 + }, + { + "epoch": 1.2752013329630658, + "grad_norm": 0.03688507154583931, + "learning_rate": 0.0019160493827160496, + "loss": 6.3659, + "step": 574 + }, + { + "epoch": 1.2774229380727575, + "grad_norm": 0.04512680694460869, + "learning_rate": 0.0019135802469135803, + "loss": 6.5339, + "step": 575 + }, + { + "epoch": 1.2796445431824492, + "grad_norm": 0.03090626187622547, + "learning_rate": 0.0019111111111111113, + "loss": 6.2844, + "step": 576 + }, + { + "epoch": 1.2818661482921412, + "grad_norm": 0.034483786672353745, + "learning_rate": 0.001908641975308642, + "loss": 6.3264, + "step": 577 + }, + { + "epoch": 1.284087753401833, + "grad_norm": 0.031084293499588966, + "learning_rate": 0.001906172839506173, + "loss": 6.1903, + "step": 578 + }, + { + "epoch": 1.2863093585115246, + "grad_norm": 0.033466219902038574, + "learning_rate": 0.0019037037037037037, + "loss": 6.0493, + "step": 579 + }, + { + "epoch": 1.2885309636212163, + "grad_norm": 0.053052324801683426, + "learning_rate": 0.0019012345679012346, + "loss": 6.2936, + "step": 580 + }, + { + "epoch": 1.290752568730908, + "grad_norm": 0.03073807992041111, + "learning_rate": 0.0018987654320987656, + "loss": 6.1623, + "step": 581 + }, + { + "epoch": 1.2929741738405998, + "grad_norm": 0.029447197914123535, + "learning_rate": 0.0018962962962962963, + "loss": 6.1734, + "step": 582 + }, + { + "epoch": 1.2951957789502915, + "grad_norm": 0.03545555844902992, + "learning_rate": 0.0018938271604938273, + "loss": 6.067, + "step": 583 + }, + { + "epoch": 1.2974173840599834, + "grad_norm": 0.030690601095557213, + "learning_rate": 0.001891358024691358, + "loss": 6.3609, + "step": 584 + }, + { + "epoch": 1.2996389891696751, + "grad_norm": 0.031808044761419296, + "learning_rate": 0.001888888888888889, + "loss": 6.5173, + "step": 585 + }, + { + "epoch": 1.3018605942793668, + "grad_norm": 0.05404243990778923, + "learning_rate": 0.0018864197530864197, + "loss": 6.6066, + "step": 586 + }, + { + "epoch": 1.3040821993890586, + "grad_norm": 0.03418256714940071, + "learning_rate": 0.0018839506172839506, + "loss": 6.3429, + "step": 587 + }, + { + "epoch": 1.3063038044987503, + "grad_norm": 0.0333407036960125, + "learning_rate": 0.0018814814814814816, + "loss": 6.542, + "step": 588 + }, + { + "epoch": 1.3085254096084422, + "grad_norm": 0.04213091358542442, + "learning_rate": 0.0018790123456790123, + "loss": 5.9407, + "step": 589 + }, + { + "epoch": 1.310747014718134, + "grad_norm": 0.03965051472187042, + "learning_rate": 0.0018765432098765433, + "loss": 6.323, + "step": 590 + }, + { + "epoch": 1.3129686198278256, + "grad_norm": 0.041978783905506134, + "learning_rate": 0.001874074074074074, + "loss": 6.1851, + "step": 591 + }, + { + "epoch": 1.3151902249375174, + "grad_norm": 0.030312292277812958, + "learning_rate": 0.001871604938271605, + "loss": 6.5231, + "step": 592 + }, + { + "epoch": 1.317411830047209, + "grad_norm": 0.04681335762143135, + "learning_rate": 0.0018691358024691357, + "loss": 6.3259, + "step": 593 + }, + { + "epoch": 1.3196334351569008, + "grad_norm": 0.04509252309799194, + "learning_rate": 0.0018666666666666666, + "loss": 6.208, + "step": 594 + }, + { + "epoch": 1.3218550402665925, + "grad_norm": 0.03200953081250191, + "learning_rate": 0.0018641975308641974, + "loss": 6.1602, + "step": 595 + }, + { + "epoch": 1.3240766453762844, + "grad_norm": 0.032900337129831314, + "learning_rate": 0.0018617283950617283, + "loss": 6.129, + "step": 596 + }, + { + "epoch": 1.3262982504859762, + "grad_norm": 0.03652816638350487, + "learning_rate": 0.0018592592592592595, + "loss": 6.3783, + "step": 597 + }, + { + "epoch": 1.3285198555956679, + "grad_norm": 0.040998030453920364, + "learning_rate": 0.00185679012345679, + "loss": 6.869, + "step": 598 + }, + { + "epoch": 1.3307414607053596, + "grad_norm": 0.036215465515851974, + "learning_rate": 0.0018543209876543212, + "loss": 6.324, + "step": 599 + }, + { + "epoch": 1.3329630658150513, + "grad_norm": 0.04111867398023605, + "learning_rate": 0.0018518518518518517, + "loss": 6.2809, + "step": 600 + }, + { + "epoch": 1.3351846709247432, + "grad_norm": 0.041931070387363434, + "learning_rate": 0.0018493827160493829, + "loss": 6.5084, + "step": 601 + }, + { + "epoch": 1.337406276034435, + "grad_norm": 0.03920350596308708, + "learning_rate": 0.0018469135802469134, + "loss": 6.7175, + "step": 602 + }, + { + "epoch": 1.3396278811441267, + "grad_norm": 0.03189566358923912, + "learning_rate": 0.0018444444444444446, + "loss": 6.4014, + "step": 603 + }, + { + "epoch": 1.3418494862538184, + "grad_norm": 0.04909387603402138, + "learning_rate": 0.0018419753086419755, + "loss": 6.4304, + "step": 604 + }, + { + "epoch": 1.34407109136351, + "grad_norm": 0.037338417023420334, + "learning_rate": 0.0018395061728395062, + "loss": 6.5664, + "step": 605 + }, + { + "epoch": 1.3462926964732018, + "grad_norm": 0.040651820600032806, + "learning_rate": 0.0018370370370370372, + "loss": 6.4001, + "step": 606 + }, + { + "epoch": 1.3485143015828935, + "grad_norm": 0.0633411705493927, + "learning_rate": 0.001834567901234568, + "loss": 6.1285, + "step": 607 + }, + { + "epoch": 1.3507359066925855, + "grad_norm": 0.045840244740247726, + "learning_rate": 0.0018320987654320989, + "loss": 6.5343, + "step": 608 + }, + { + "epoch": 1.3529575118022772, + "grad_norm": 0.031113650649785995, + "learning_rate": 0.0018296296296296296, + "loss": 6.5995, + "step": 609 + }, + { + "epoch": 1.355179116911969, + "grad_norm": 0.03467819094657898, + "learning_rate": 0.0018271604938271606, + "loss": 6.3797, + "step": 610 + }, + { + "epoch": 1.3574007220216606, + "grad_norm": 0.05771856755018234, + "learning_rate": 0.0018246913580246915, + "loss": 6.1617, + "step": 611 + }, + { + "epoch": 1.3596223271313523, + "grad_norm": 0.0407741442322731, + "learning_rate": 0.0018222222222222223, + "loss": 6.3389, + "step": 612 + }, + { + "epoch": 1.3618439322410443, + "grad_norm": 0.04614827781915665, + "learning_rate": 0.0018197530864197532, + "loss": 6.4762, + "step": 613 + }, + { + "epoch": 1.364065537350736, + "grad_norm": 0.04630822315812111, + "learning_rate": 0.001817283950617284, + "loss": 6.1302, + "step": 614 + }, + { + "epoch": 1.3662871424604277, + "grad_norm": 0.04013388976454735, + "learning_rate": 0.001814814814814815, + "loss": 6.2063, + "step": 615 + }, + { + "epoch": 1.3685087475701194, + "grad_norm": 0.04549866542220116, + "learning_rate": 0.0018123456790123456, + "loss": 6.4473, + "step": 616 + }, + { + "epoch": 1.3707303526798111, + "grad_norm": 0.04472465440630913, + "learning_rate": 0.0018098765432098766, + "loss": 6.1478, + "step": 617 + }, + { + "epoch": 1.3729519577895029, + "grad_norm": 0.04715588688850403, + "learning_rate": 0.0018074074074074075, + "loss": 6.171, + "step": 618 + }, + { + "epoch": 1.3751735628991946, + "grad_norm": 0.04618202894926071, + "learning_rate": 0.0018049382716049383, + "loss": 5.8558, + "step": 619 + }, + { + "epoch": 1.3773951680088863, + "grad_norm": 0.05153551697731018, + "learning_rate": 0.0018024691358024692, + "loss": 6.2804, + "step": 620 + }, + { + "epoch": 1.3796167731185782, + "grad_norm": 0.04584895446896553, + "learning_rate": 0.0018, + "loss": 6.3909, + "step": 621 + }, + { + "epoch": 1.38183837822827, + "grad_norm": 0.03741832450032234, + "learning_rate": 0.001797530864197531, + "loss": 6.2864, + "step": 622 + }, + { + "epoch": 1.3840599833379617, + "grad_norm": 0.05302848666906357, + "learning_rate": 0.0017950617283950616, + "loss": 6.1101, + "step": 623 + }, + { + "epoch": 1.3862815884476534, + "grad_norm": 0.05580468848347664, + "learning_rate": 0.0017925925925925926, + "loss": 6.4619, + "step": 624 + }, + { + "epoch": 1.3885031935573453, + "grad_norm": 0.03729956969618797, + "learning_rate": 0.0017901234567901233, + "loss": 6.3855, + "step": 625 + }, + { + "epoch": 1.390724798667037, + "grad_norm": 0.03489207103848457, + "learning_rate": 0.0017876543209876543, + "loss": 6.1936, + "step": 626 + }, + { + "epoch": 1.3929464037767287, + "grad_norm": 0.035313766449689865, + "learning_rate": 0.0017851851851851854, + "loss": 6.009, + "step": 627 + }, + { + "epoch": 1.3951680088864205, + "grad_norm": 0.04091987758874893, + "learning_rate": 0.001782716049382716, + "loss": 6.1428, + "step": 628 + }, + { + "epoch": 1.3973896139961122, + "grad_norm": 0.03116190992295742, + "learning_rate": 0.0017802469135802471, + "loss": 6.0366, + "step": 629 + }, + { + "epoch": 1.3996112191058039, + "grad_norm": 0.03686531260609627, + "learning_rate": 0.0017777777777777776, + "loss": 6.3959, + "step": 630 + }, + { + "epoch": 1.4018328242154956, + "grad_norm": 0.03318272531032562, + "learning_rate": 0.0017753086419753088, + "loss": 6.4128, + "step": 631 + }, + { + "epoch": 1.4040544293251873, + "grad_norm": 0.041839905083179474, + "learning_rate": 0.0017728395061728393, + "loss": 6.5429, + "step": 632 + }, + { + "epoch": 1.4062760344348793, + "grad_norm": 0.030093267560005188, + "learning_rate": 0.0017703703703703705, + "loss": 6.2587, + "step": 633 + }, + { + "epoch": 1.408497639544571, + "grad_norm": 0.05865675210952759, + "learning_rate": 0.0017679012345679015, + "loss": 6.2798, + "step": 634 + }, + { + "epoch": 1.4107192446542627, + "grad_norm": 0.0343112088739872, + "learning_rate": 0.0017654320987654322, + "loss": 6.1428, + "step": 635 + }, + { + "epoch": 1.4129408497639544, + "grad_norm": 0.03351147472858429, + "learning_rate": 0.0017629629629629631, + "loss": 6.2343, + "step": 636 + }, + { + "epoch": 1.4151624548736463, + "grad_norm": 0.034447863698005676, + "learning_rate": 0.0017604938271604939, + "loss": 6.3156, + "step": 637 + }, + { + "epoch": 1.417384059983338, + "grad_norm": 0.03670838847756386, + "learning_rate": 0.0017580246913580248, + "loss": 6.3385, + "step": 638 + }, + { + "epoch": 1.4196056650930298, + "grad_norm": 0.03930442035198212, + "learning_rate": 0.0017555555555555556, + "loss": 6.1479, + "step": 639 + }, + { + "epoch": 1.4218272702027215, + "grad_norm": 0.03430639207363129, + "learning_rate": 0.0017530864197530865, + "loss": 6.1624, + "step": 640 + }, + { + "epoch": 1.4240488753124132, + "grad_norm": 0.04005846008658409, + "learning_rate": 0.0017506172839506175, + "loss": 6.3732, + "step": 641 + }, + { + "epoch": 1.426270480422105, + "grad_norm": 0.041829563677310944, + "learning_rate": 0.0017481481481481482, + "loss": 6.283, + "step": 642 + }, + { + "epoch": 1.4284920855317966, + "grad_norm": 0.03966071456670761, + "learning_rate": 0.0017456790123456791, + "loss": 6.3854, + "step": 643 + }, + { + "epoch": 1.4307136906414883, + "grad_norm": 0.04266310855746269, + "learning_rate": 0.0017432098765432099, + "loss": 5.8232, + "step": 644 + }, + { + "epoch": 1.4329352957511803, + "grad_norm": 0.034664738923311234, + "learning_rate": 0.0017407407407407408, + "loss": 6.1072, + "step": 645 + }, + { + "epoch": 1.435156900860872, + "grad_norm": 0.03348352760076523, + "learning_rate": 0.0017382716049382716, + "loss": 6.2337, + "step": 646 + }, + { + "epoch": 1.4373785059705637, + "grad_norm": 0.04647333547472954, + "learning_rate": 0.0017358024691358025, + "loss": 6.2249, + "step": 647 + }, + { + "epoch": 1.4396001110802554, + "grad_norm": 0.03087199293076992, + "learning_rate": 0.0017333333333333333, + "loss": 5.9, + "step": 648 + }, + { + "epoch": 1.4418217161899474, + "grad_norm": 0.031058957800269127, + "learning_rate": 0.0017308641975308642, + "loss": 6.2423, + "step": 649 + }, + { + "epoch": 1.444043321299639, + "grad_norm": 0.032819394022226334, + "learning_rate": 0.0017283950617283952, + "loss": 6.3537, + "step": 650 + }, + { + "epoch": 1.4462649264093308, + "grad_norm": 0.047803789377212524, + "learning_rate": 0.0017259259259259259, + "loss": 6.1019, + "step": 651 + }, + { + "epoch": 1.4484865315190225, + "grad_norm": 0.03795769438147545, + "learning_rate": 0.0017234567901234568, + "loss": 6.037, + "step": 652 + }, + { + "epoch": 1.4507081366287142, + "grad_norm": 0.042528439313173294, + "learning_rate": 0.0017209876543209876, + "loss": 6.3609, + "step": 653 + }, + { + "epoch": 1.452929741738406, + "grad_norm": 0.04530796408653259, + "learning_rate": 0.0017185185185185185, + "loss": 6.4745, + "step": 654 + }, + { + "epoch": 1.4551513468480977, + "grad_norm": 0.03794233873486519, + "learning_rate": 0.0017160493827160493, + "loss": 6.191, + "step": 655 + }, + { + "epoch": 1.4573729519577894, + "grad_norm": 0.045053884387016296, + "learning_rate": 0.0017135802469135802, + "loss": 6.085, + "step": 656 + }, + { + "epoch": 1.4595945570674813, + "grad_norm": 0.05708610638976097, + "learning_rate": 0.0017111111111111114, + "loss": 6.0017, + "step": 657 + }, + { + "epoch": 1.461816162177173, + "grad_norm": 0.03785408288240433, + "learning_rate": 0.001708641975308642, + "loss": 6.3146, + "step": 658 + }, + { + "epoch": 1.4640377672868647, + "grad_norm": 0.05379173904657364, + "learning_rate": 0.001706172839506173, + "loss": 5.7694, + "step": 659 + }, + { + "epoch": 1.4662593723965565, + "grad_norm": 0.043331608176231384, + "learning_rate": 0.0017037037037037036, + "loss": 6.3276, + "step": 660 + }, + { + "epoch": 1.4684809775062484, + "grad_norm": 0.03599626198410988, + "learning_rate": 0.0017012345679012348, + "loss": 6.3605, + "step": 661 + }, + { + "epoch": 1.4707025826159401, + "grad_norm": 0.0370354950428009, + "learning_rate": 0.0016987654320987653, + "loss": 6.2993, + "step": 662 + }, + { + "epoch": 1.4729241877256318, + "grad_norm": 0.03861134499311447, + "learning_rate": 0.0016962962962962964, + "loss": 6.4733, + "step": 663 + }, + { + "epoch": 1.4751457928353235, + "grad_norm": 0.03696829080581665, + "learning_rate": 0.0016938271604938274, + "loss": 6.3908, + "step": 664 + }, + { + "epoch": 1.4773673979450153, + "grad_norm": 0.03327971696853638, + "learning_rate": 0.0016913580246913581, + "loss": 5.9808, + "step": 665 + }, + { + "epoch": 1.479589003054707, + "grad_norm": 0.0478159636259079, + "learning_rate": 0.001688888888888889, + "loss": 6.1371, + "step": 666 + }, + { + "epoch": 1.4818106081643987, + "grad_norm": 0.03853146731853485, + "learning_rate": 0.0016864197530864198, + "loss": 6.1711, + "step": 667 + }, + { + "epoch": 1.4840322132740904, + "grad_norm": 0.041707154363393784, + "learning_rate": 0.0016839506172839508, + "loss": 6.2316, + "step": 668 + }, + { + "epoch": 1.4862538183837823, + "grad_norm": 0.031753845512866974, + "learning_rate": 0.0016814814814814813, + "loss": 6.3248, + "step": 669 + }, + { + "epoch": 1.488475423493474, + "grad_norm": 0.03383229672908783, + "learning_rate": 0.0016790123456790125, + "loss": 6.3733, + "step": 670 + }, + { + "epoch": 1.4906970286031658, + "grad_norm": 0.05415927246212959, + "learning_rate": 0.0016765432098765434, + "loss": 6.554, + "step": 671 + }, + { + "epoch": 1.4929186337128575, + "grad_norm": 0.05383379012346268, + "learning_rate": 0.0016740740740740741, + "loss": 6.3365, + "step": 672 + }, + { + "epoch": 1.4951402388225494, + "grad_norm": 0.03220098838210106, + "learning_rate": 0.001671604938271605, + "loss": 6.2972, + "step": 673 + }, + { + "epoch": 1.4973618439322411, + "grad_norm": 0.04361923038959503, + "learning_rate": 0.0016691358024691358, + "loss": 6.1342, + "step": 674 + }, + { + "epoch": 1.4995834490419329, + "grad_norm": 0.034433454275131226, + "learning_rate": 0.0016666666666666668, + "loss": 6.3875, + "step": 675 + }, + { + "epoch": 1.5018050541516246, + "grad_norm": 0.03784746676683426, + "learning_rate": 0.0016641975308641975, + "loss": 6.2219, + "step": 676 + }, + { + "epoch": 1.5040266592613163, + "grad_norm": 0.03338635712862015, + "learning_rate": 0.0016617283950617285, + "loss": 6.5494, + "step": 677 + }, + { + "epoch": 1.506248264371008, + "grad_norm": 0.03723134845495224, + "learning_rate": 0.0016592592592592592, + "loss": 6.1127, + "step": 678 + }, + { + "epoch": 1.5084698694806997, + "grad_norm": 0.04377841576933861, + "learning_rate": 0.0016567901234567901, + "loss": 6.3558, + "step": 679 + }, + { + "epoch": 1.5106914745903914, + "grad_norm": 0.05502425879240036, + "learning_rate": 0.001654320987654321, + "loss": 6.0453, + "step": 680 + }, + { + "epoch": 1.5129130797000832, + "grad_norm": 0.06568772345781326, + "learning_rate": 0.0016518518518518518, + "loss": 6.0028, + "step": 681 + }, + { + "epoch": 1.515134684809775, + "grad_norm": 0.033911559730768204, + "learning_rate": 0.0016493827160493828, + "loss": 6.1029, + "step": 682 + }, + { + "epoch": 1.5173562899194668, + "grad_norm": 0.038924045860767365, + "learning_rate": 0.0016469135802469135, + "loss": 5.99, + "step": 683 + }, + { + "epoch": 1.5195778950291585, + "grad_norm": 0.07635977864265442, + "learning_rate": 0.0016444444444444445, + "loss": 6.0661, + "step": 684 + }, + { + "epoch": 1.5217995001388505, + "grad_norm": 0.04160960763692856, + "learning_rate": 0.0016419753086419752, + "loss": 6.0989, + "step": 685 + }, + { + "epoch": 1.5240211052485422, + "grad_norm": 0.04776043817400932, + "learning_rate": 0.0016395061728395062, + "loss": 6.38, + "step": 686 + }, + { + "epoch": 1.526242710358234, + "grad_norm": 0.038796450942754745, + "learning_rate": 0.001637037037037037, + "loss": 6.2388, + "step": 687 + }, + { + "epoch": 1.5284643154679256, + "grad_norm": 0.044839583337306976, + "learning_rate": 0.0016345679012345678, + "loss": 6.5456, + "step": 688 + }, + { + "epoch": 1.5306859205776173, + "grad_norm": 0.038193706423044205, + "learning_rate": 0.0016320987654320988, + "loss": 6.1368, + "step": 689 + }, + { + "epoch": 1.532907525687309, + "grad_norm": 0.03954074904322624, + "learning_rate": 0.0016296296296296295, + "loss": 6.209, + "step": 690 + }, + { + "epoch": 1.5351291307970008, + "grad_norm": 0.04989338293671608, + "learning_rate": 0.0016271604938271605, + "loss": 6.11, + "step": 691 + }, + { + "epoch": 1.5373507359066925, + "grad_norm": 0.03306099399924278, + "learning_rate": 0.0016246913580246912, + "loss": 6.079, + "step": 692 + }, + { + "epoch": 1.5395723410163842, + "grad_norm": 0.049217116087675095, + "learning_rate": 0.0016222222222222222, + "loss": 6.0589, + "step": 693 + }, + { + "epoch": 1.5417939461260761, + "grad_norm": 0.03254625201225281, + "learning_rate": 0.0016197530864197533, + "loss": 6.3458, + "step": 694 + }, + { + "epoch": 1.5440155512357678, + "grad_norm": 0.03594857081770897, + "learning_rate": 0.0016172839506172839, + "loss": 6.195, + "step": 695 + }, + { + "epoch": 1.5462371563454596, + "grad_norm": 0.07235965877771378, + "learning_rate": 0.001614814814814815, + "loss": 6.005, + "step": 696 + }, + { + "epoch": 1.5484587614551515, + "grad_norm": 0.06128450110554695, + "learning_rate": 0.0016123456790123455, + "loss": 6.5062, + "step": 697 + }, + { + "epoch": 1.5506803665648432, + "grad_norm": 0.03547682240605354, + "learning_rate": 0.0016098765432098767, + "loss": 6.0139, + "step": 698 + }, + { + "epoch": 1.552901971674535, + "grad_norm": 0.057652879506349564, + "learning_rate": 0.0016074074074074072, + "loss": 5.8171, + "step": 699 + }, + { + "epoch": 1.5551235767842266, + "grad_norm": 0.04588836058974266, + "learning_rate": 0.0016049382716049384, + "loss": 5.9613, + "step": 700 + }, + { + "epoch": 1.5573451818939184, + "grad_norm": 0.03351445123553276, + "learning_rate": 0.0016024691358024693, + "loss": 6.394, + "step": 701 + }, + { + "epoch": 1.55956678700361, + "grad_norm": 0.037374623119831085, + "learning_rate": 0.0016, + "loss": 5.9167, + "step": 702 + }, + { + "epoch": 1.5617883921133018, + "grad_norm": 0.04311612620949745, + "learning_rate": 0.001597530864197531, + "loss": 5.935, + "step": 703 + }, + { + "epoch": 1.5640099972229935, + "grad_norm": 0.04786458984017372, + "learning_rate": 0.0015950617283950618, + "loss": 6.3014, + "step": 704 + }, + { + "epoch": 1.5662316023326852, + "grad_norm": 0.0439060777425766, + "learning_rate": 0.0015925925925925927, + "loss": 6.1564, + "step": 705 + }, + { + "epoch": 1.5684532074423772, + "grad_norm": 0.04528342932462692, + "learning_rate": 0.0015901234567901234, + "loss": 6.2511, + "step": 706 + }, + { + "epoch": 1.5706748125520689, + "grad_norm": 0.04323355108499527, + "learning_rate": 0.0015876543209876544, + "loss": 6.2638, + "step": 707 + }, + { + "epoch": 1.5728964176617606, + "grad_norm": 0.04894525185227394, + "learning_rate": 0.0015851851851851851, + "loss": 6.0646, + "step": 708 + }, + { + "epoch": 1.5751180227714525, + "grad_norm": 0.044189661741256714, + "learning_rate": 0.001582716049382716, + "loss": 5.9077, + "step": 709 + }, + { + "epoch": 1.5773396278811442, + "grad_norm": 0.05571754276752472, + "learning_rate": 0.001580246913580247, + "loss": 6.4761, + "step": 710 + }, + { + "epoch": 1.579561232990836, + "grad_norm": 0.055017706006765366, + "learning_rate": 0.0015777777777777778, + "loss": 6.1056, + "step": 711 + }, + { + "epoch": 1.5817828381005277, + "grad_norm": 0.03247570991516113, + "learning_rate": 0.0015753086419753087, + "loss": 6.2023, + "step": 712 + }, + { + "epoch": 1.5840044432102194, + "grad_norm": 0.03337739408016205, + "learning_rate": 0.0015728395061728395, + "loss": 6.2494, + "step": 713 + }, + { + "epoch": 1.586226048319911, + "grad_norm": 0.03905110061168671, + "learning_rate": 0.0015703703703703704, + "loss": 6.1822, + "step": 714 + }, + { + "epoch": 1.5884476534296028, + "grad_norm": 0.050255563110113144, + "learning_rate": 0.0015679012345679011, + "loss": 5.9656, + "step": 715 + }, + { + "epoch": 1.5906692585392945, + "grad_norm": 0.040939997881650925, + "learning_rate": 0.001565432098765432, + "loss": 6.2584, + "step": 716 + }, + { + "epoch": 1.5928908636489862, + "grad_norm": 0.04037930443882942, + "learning_rate": 0.001562962962962963, + "loss": 6.2741, + "step": 717 + }, + { + "epoch": 1.5951124687586782, + "grad_norm": 0.04207153990864754, + "learning_rate": 0.0015604938271604938, + "loss": 6.4213, + "step": 718 + }, + { + "epoch": 1.59733407386837, + "grad_norm": 0.03851177915930748, + "learning_rate": 0.0015580246913580247, + "loss": 6.3191, + "step": 719 + }, + { + "epoch": 1.5995556789780616, + "grad_norm": 0.038728807121515274, + "learning_rate": 0.0015555555555555555, + "loss": 6.2651, + "step": 720 + }, + { + "epoch": 1.6017772840877536, + "grad_norm": 0.053951799869537354, + "learning_rate": 0.0015530864197530864, + "loss": 5.9007, + "step": 721 + }, + { + "epoch": 1.6039988891974453, + "grad_norm": 0.03671761229634285, + "learning_rate": 0.0015506172839506172, + "loss": 6.1562, + "step": 722 + }, + { + "epoch": 1.606220494307137, + "grad_norm": 0.04554244130849838, + "learning_rate": 0.001548148148148148, + "loss": 6.0253, + "step": 723 + }, + { + "epoch": 1.6084420994168287, + "grad_norm": 0.053976450115442276, + "learning_rate": 0.0015456790123456793, + "loss": 6.3573, + "step": 724 + }, + { + "epoch": 1.6106637045265204, + "grad_norm": 0.03967395797371864, + "learning_rate": 0.0015432098765432098, + "loss": 6.2846, + "step": 725 + }, + { + "epoch": 1.6128853096362121, + "grad_norm": 0.03604193776845932, + "learning_rate": 0.001540740740740741, + "loss": 6.2174, + "step": 726 + }, + { + "epoch": 1.6151069147459038, + "grad_norm": 0.040343984961509705, + "learning_rate": 0.0015382716049382715, + "loss": 6.4531, + "step": 727 + }, + { + "epoch": 1.6173285198555956, + "grad_norm": 0.04057557135820389, + "learning_rate": 0.0015358024691358026, + "loss": 6.0287, + "step": 728 + }, + { + "epoch": 1.6195501249652873, + "grad_norm": 0.049208082258701324, + "learning_rate": 0.0015333333333333332, + "loss": 6.3208, + "step": 729 + }, + { + "epoch": 1.6217717300749792, + "grad_norm": 0.05058284103870392, + "learning_rate": 0.0015308641975308643, + "loss": 6.2669, + "step": 730 + }, + { + "epoch": 1.623993335184671, + "grad_norm": 0.03900137171149254, + "learning_rate": 0.0015283950617283948, + "loss": 6.215, + "step": 731 + }, + { + "epoch": 1.6262149402943626, + "grad_norm": 0.028513478115200996, + "learning_rate": 0.001525925925925926, + "loss": 6.3372, + "step": 732 + }, + { + "epoch": 1.6284365454040546, + "grad_norm": 0.04768927022814751, + "learning_rate": 0.001523456790123457, + "loss": 6.4372, + "step": 733 + }, + { + "epoch": 1.6306581505137463, + "grad_norm": 0.05876043811440468, + "learning_rate": 0.0015209876543209877, + "loss": 6.3986, + "step": 734 + }, + { + "epoch": 1.632879755623438, + "grad_norm": 0.03884493187069893, + "learning_rate": 0.0015185185185185187, + "loss": 6.0089, + "step": 735 + }, + { + "epoch": 1.6351013607331297, + "grad_norm": 0.044803451746702194, + "learning_rate": 0.0015160493827160494, + "loss": 6.1839, + "step": 736 + }, + { + "epoch": 1.6373229658428214, + "grad_norm": 0.03137345612049103, + "learning_rate": 0.0015135802469135803, + "loss": 6.3144, + "step": 737 + }, + { + "epoch": 1.6395445709525132, + "grad_norm": 0.03509042412042618, + "learning_rate": 0.001511111111111111, + "loss": 6.5495, + "step": 738 + }, + { + "epoch": 1.6417661760622049, + "grad_norm": 0.043359674513339996, + "learning_rate": 0.001508641975308642, + "loss": 6.0988, + "step": 739 + }, + { + "epoch": 1.6439877811718966, + "grad_norm": 0.05285673215985298, + "learning_rate": 0.001506172839506173, + "loss": 6.4118, + "step": 740 + }, + { + "epoch": 1.6462093862815883, + "grad_norm": 0.051178302615880966, + "learning_rate": 0.0015037037037037037, + "loss": 5.986, + "step": 741 + }, + { + "epoch": 1.6484309913912802, + "grad_norm": 0.03230026364326477, + "learning_rate": 0.0015012345679012347, + "loss": 6.0246, + "step": 742 + }, + { + "epoch": 1.650652596500972, + "grad_norm": 0.04651426896452904, + "learning_rate": 0.0014987654320987656, + "loss": 6.0249, + "step": 743 + }, + { + "epoch": 1.6528742016106637, + "grad_norm": 0.05399320647120476, + "learning_rate": 0.0014962962962962963, + "loss": 6.1514, + "step": 744 + }, + { + "epoch": 1.6550958067203556, + "grad_norm": 0.05591241270303726, + "learning_rate": 0.0014938271604938273, + "loss": 6.3867, + "step": 745 + }, + { + "epoch": 1.6573174118300473, + "grad_norm": 0.042732879519462585, + "learning_rate": 0.001491358024691358, + "loss": 6.1903, + "step": 746 + }, + { + "epoch": 1.659539016939739, + "grad_norm": 0.04537397250533104, + "learning_rate": 0.001488888888888889, + "loss": 6.1143, + "step": 747 + }, + { + "epoch": 1.6617606220494308, + "grad_norm": 0.04793582111597061, + "learning_rate": 0.0014864197530864197, + "loss": 6.221, + "step": 748 + }, + { + "epoch": 1.6639822271591225, + "grad_norm": 0.03770630434155464, + "learning_rate": 0.0014839506172839507, + "loss": 6.183, + "step": 749 + }, + { + "epoch": 1.6662038322688142, + "grad_norm": 0.04926915839314461, + "learning_rate": 0.0014814814814814814, + "loss": 5.9252, + "step": 750 + }, + { + "epoch": 1.668425437378506, + "grad_norm": 0.029345575720071793, + "learning_rate": 0.0014790123456790124, + "loss": 6.0441, + "step": 751 + }, + { + "epoch": 1.6706470424881976, + "grad_norm": 0.04108971729874611, + "learning_rate": 0.0014765432098765433, + "loss": 6.3289, + "step": 752 + }, + { + "epoch": 1.6728686475978893, + "grad_norm": 0.052369631826877594, + "learning_rate": 0.001474074074074074, + "loss": 6.1526, + "step": 753 + }, + { + "epoch": 1.6750902527075813, + "grad_norm": 0.059364162385463715, + "learning_rate": 0.001471604938271605, + "loss": 6.1037, + "step": 754 + }, + { + "epoch": 1.677311857817273, + "grad_norm": 0.04138586297631264, + "learning_rate": 0.0014691358024691357, + "loss": 5.9273, + "step": 755 + }, + { + "epoch": 1.6795334629269647, + "grad_norm": 0.0487748347222805, + "learning_rate": 0.0014666666666666667, + "loss": 5.9145, + "step": 756 + }, + { + "epoch": 1.6817550680366566, + "grad_norm": 0.036047063767910004, + "learning_rate": 0.0014641975308641974, + "loss": 6.3341, + "step": 757 + }, + { + "epoch": 1.6839766731463484, + "grad_norm": 0.05469675362110138, + "learning_rate": 0.0014617283950617286, + "loss": 6.0284, + "step": 758 + }, + { + "epoch": 1.68619827825604, + "grad_norm": 0.04500849172472954, + "learning_rate": 0.0014592592592592593, + "loss": 6.2335, + "step": 759 + }, + { + "epoch": 1.6884198833657318, + "grad_norm": 0.04404061287641525, + "learning_rate": 0.0014567901234567903, + "loss": 6.2097, + "step": 760 + }, + { + "epoch": 1.6906414884754235, + "grad_norm": 0.04719531536102295, + "learning_rate": 0.001454320987654321, + "loss": 5.7834, + "step": 761 + }, + { + "epoch": 1.6928630935851152, + "grad_norm": 0.04584319889545441, + "learning_rate": 0.001451851851851852, + "loss": 6.0973, + "step": 762 + }, + { + "epoch": 1.695084698694807, + "grad_norm": 0.046094540506601334, + "learning_rate": 0.0014493827160493827, + "loss": 5.8519, + "step": 763 + }, + { + "epoch": 1.6973063038044987, + "grad_norm": 0.04250643029808998, + "learning_rate": 0.0014469135802469136, + "loss": 6.2503, + "step": 764 + }, + { + "epoch": 1.6995279089141904, + "grad_norm": 0.0384419821202755, + "learning_rate": 0.0014444444444444444, + "loss": 6.0693, + "step": 765 + }, + { + "epoch": 1.7017495140238823, + "grad_norm": 0.050014935433864594, + "learning_rate": 0.0014419753086419753, + "loss": 6.383, + "step": 766 + }, + { + "epoch": 1.703971119133574, + "grad_norm": 0.045423299074172974, + "learning_rate": 0.0014395061728395063, + "loss": 5.9695, + "step": 767 + }, + { + "epoch": 1.7061927242432657, + "grad_norm": 0.052475545555353165, + "learning_rate": 0.001437037037037037, + "loss": 6.1188, + "step": 768 + }, + { + "epoch": 1.7084143293529577, + "grad_norm": 0.04825114831328392, + "learning_rate": 0.001434567901234568, + "loss": 5.7932, + "step": 769 + }, + { + "epoch": 1.7106359344626494, + "grad_norm": 0.041027557104825974, + "learning_rate": 0.0014320987654320987, + "loss": 6.0139, + "step": 770 + }, + { + "epoch": 1.712857539572341, + "grad_norm": 0.054573532193899155, + "learning_rate": 0.0014296296296296297, + "loss": 6.2518, + "step": 771 + }, + { + "epoch": 1.7150791446820328, + "grad_norm": 0.03849795088171959, + "learning_rate": 0.0014271604938271604, + "loss": 6.1488, + "step": 772 + }, + { + "epoch": 1.7173007497917245, + "grad_norm": 0.038570936769247055, + "learning_rate": 0.0014246913580246916, + "loss": 6.1516, + "step": 773 + }, + { + "epoch": 1.7195223549014163, + "grad_norm": 0.03841586410999298, + "learning_rate": 0.0014222222222222223, + "loss": 6.0528, + "step": 774 + }, + { + "epoch": 1.721743960011108, + "grad_norm": 0.06937354803085327, + "learning_rate": 0.0014197530864197532, + "loss": 6.3445, + "step": 775 + }, + { + "epoch": 1.7239655651207997, + "grad_norm": 0.04137590900063515, + "learning_rate": 0.001417283950617284, + "loss": 5.8847, + "step": 776 + }, + { + "epoch": 1.7261871702304914, + "grad_norm": 0.0639619529247284, + "learning_rate": 0.001414814814814815, + "loss": 6.2012, + "step": 777 + }, + { + "epoch": 1.7284087753401833, + "grad_norm": 0.045112211257219315, + "learning_rate": 0.0014123456790123457, + "loss": 6.0829, + "step": 778 + }, + { + "epoch": 1.730630380449875, + "grad_norm": 0.04505884647369385, + "learning_rate": 0.0014098765432098766, + "loss": 6.307, + "step": 779 + }, + { + "epoch": 1.7328519855595668, + "grad_norm": 0.04798652231693268, + "learning_rate": 0.0014074074074074073, + "loss": 5.7065, + "step": 780 + }, + { + "epoch": 1.7350735906692587, + "grad_norm": 0.040401238948106766, + "learning_rate": 0.0014049382716049383, + "loss": 6.0646, + "step": 781 + }, + { + "epoch": 1.7372951957789504, + "grad_norm": 0.043497681617736816, + "learning_rate": 0.0014024691358024693, + "loss": 6.0617, + "step": 782 + }, + { + "epoch": 1.7395168008886421, + "grad_norm": 0.04268964007496834, + "learning_rate": 0.0014, + "loss": 5.9729, + "step": 783 + }, + { + "epoch": 1.7417384059983338, + "grad_norm": 0.042066771537065506, + "learning_rate": 0.001397530864197531, + "loss": 5.9399, + "step": 784 + }, + { + "epoch": 1.7439600111080256, + "grad_norm": 0.04098956659436226, + "learning_rate": 0.0013950617283950617, + "loss": 6.1768, + "step": 785 + }, + { + "epoch": 1.7461816162177173, + "grad_norm": 0.055857788771390915, + "learning_rate": 0.0013925925925925926, + "loss": 5.9895, + "step": 786 + }, + { + "epoch": 1.748403221327409, + "grad_norm": 0.05863477289676666, + "learning_rate": 0.0013901234567901234, + "loss": 5.966, + "step": 787 + }, + { + "epoch": 1.7506248264371007, + "grad_norm": 0.06156482547521591, + "learning_rate": 0.0013876543209876545, + "loss": 5.9458, + "step": 788 + }, + { + "epoch": 1.7528464315467924, + "grad_norm": 0.03802463412284851, + "learning_rate": 0.0013851851851851853, + "loss": 6.0276, + "step": 789 + }, + { + "epoch": 1.7550680366564844, + "grad_norm": 0.04245764762163162, + "learning_rate": 0.0013827160493827162, + "loss": 6.0092, + "step": 790 + }, + { + "epoch": 1.757289641766176, + "grad_norm": 0.04828885570168495, + "learning_rate": 0.001380246913580247, + "loss": 6.1139, + "step": 791 + }, + { + "epoch": 1.7595112468758678, + "grad_norm": 0.04812488704919815, + "learning_rate": 0.001377777777777778, + "loss": 6.4833, + "step": 792 + }, + { + "epoch": 1.7617328519855595, + "grad_norm": 0.0436619408428669, + "learning_rate": 0.0013753086419753086, + "loss": 6.0711, + "step": 793 + }, + { + "epoch": 1.7639544570952514, + "grad_norm": 0.04308826103806496, + "learning_rate": 0.0013728395061728396, + "loss": 6.2644, + "step": 794 + }, + { + "epoch": 1.7661760622049432, + "grad_norm": 0.05595472455024719, + "learning_rate": 0.0013703703703703703, + "loss": 6.1844, + "step": 795 + }, + { + "epoch": 1.7683976673146349, + "grad_norm": 0.03139431029558182, + "learning_rate": 0.0013679012345679013, + "loss": 6.2593, + "step": 796 + }, + { + "epoch": 1.7706192724243266, + "grad_norm": 0.04515387490391731, + "learning_rate": 0.0013654320987654322, + "loss": 6.0786, + "step": 797 + }, + { + "epoch": 1.7728408775340183, + "grad_norm": 0.03661363571882248, + "learning_rate": 0.001362962962962963, + "loss": 6.3232, + "step": 798 + }, + { + "epoch": 1.77506248264371, + "grad_norm": 0.030566265806555748, + "learning_rate": 0.001360493827160494, + "loss": 6.0365, + "step": 799 + }, + { + "epoch": 1.7772840877534017, + "grad_norm": 0.0449000783264637, + "learning_rate": 0.0013580246913580246, + "loss": 6.3158, + "step": 800 + }, + { + "epoch": 1.7795056928630935, + "grad_norm": 0.04717319831252098, + "learning_rate": 0.0013555555555555556, + "loss": 6.0393, + "step": 801 + }, + { + "epoch": 1.7817272979727852, + "grad_norm": 0.06261464208364487, + "learning_rate": 0.0013530864197530863, + "loss": 6.0147, + "step": 802 + }, + { + "epoch": 1.783948903082477, + "grad_norm": 0.044654421508312225, + "learning_rate": 0.0013506172839506175, + "loss": 6.1854, + "step": 803 + }, + { + "epoch": 1.7861705081921688, + "grad_norm": 0.04214232787489891, + "learning_rate": 0.0013481481481481482, + "loss": 6.1411, + "step": 804 + }, + { + "epoch": 1.7883921133018605, + "grad_norm": 0.06957382708787918, + "learning_rate": 0.0013456790123456792, + "loss": 5.9172, + "step": 805 + }, + { + "epoch": 1.7906137184115525, + "grad_norm": 0.035519879311323166, + "learning_rate": 0.00134320987654321, + "loss": 6.243, + "step": 806 + }, + { + "epoch": 1.7928353235212442, + "grad_norm": 0.04761626198887825, + "learning_rate": 0.0013407407407407409, + "loss": 6.2272, + "step": 807 + }, + { + "epoch": 1.795056928630936, + "grad_norm": 0.06698905676603317, + "learning_rate": 0.0013382716049382716, + "loss": 6.0552, + "step": 808 + }, + { + "epoch": 1.7972785337406276, + "grad_norm": 0.03679412230849266, + "learning_rate": 0.0013358024691358023, + "loss": 6.0877, + "step": 809 + }, + { + "epoch": 1.7995001388503193, + "grad_norm": 0.04155512899160385, + "learning_rate": 0.0013333333333333333, + "loss": 6.0179, + "step": 810 + }, + { + "epoch": 1.801721743960011, + "grad_norm": 0.044784773141145706, + "learning_rate": 0.0013308641975308642, + "loss": 6.2279, + "step": 811 + }, + { + "epoch": 1.8039433490697028, + "grad_norm": 0.04515000805258751, + "learning_rate": 0.0013283950617283952, + "loss": 6.0754, + "step": 812 + }, + { + "epoch": 1.8061649541793945, + "grad_norm": 0.06366923451423645, + "learning_rate": 0.001325925925925926, + "loss": 6.102, + "step": 813 + }, + { + "epoch": 1.8083865592890862, + "grad_norm": 0.04164503514766693, + "learning_rate": 0.0013234567901234569, + "loss": 5.9215, + "step": 814 + }, + { + "epoch": 1.8106081643987781, + "grad_norm": 0.059181466698646545, + "learning_rate": 0.0013209876543209876, + "loss": 6.1557, + "step": 815 + }, + { + "epoch": 1.8128297695084699, + "grad_norm": 0.04095742478966713, + "learning_rate": 0.0013185185185185186, + "loss": 5.9996, + "step": 816 + }, + { + "epoch": 1.8150513746181616, + "grad_norm": 0.057590506970882416, + "learning_rate": 0.0013160493827160493, + "loss": 5.9219, + "step": 817 + }, + { + "epoch": 1.8172729797278535, + "grad_norm": 0.039277397096157074, + "learning_rate": 0.0013135802469135802, + "loss": 5.9991, + "step": 818 + }, + { + "epoch": 1.8194945848375452, + "grad_norm": 0.0515102855861187, + "learning_rate": 0.0013111111111111112, + "loss": 5.9543, + "step": 819 + }, + { + "epoch": 1.821716189947237, + "grad_norm": 0.06030356138944626, + "learning_rate": 0.001308641975308642, + "loss": 6.1968, + "step": 820 + }, + { + "epoch": 1.8239377950569287, + "grad_norm": 0.042879413813352585, + "learning_rate": 0.0013061728395061729, + "loss": 6.0359, + "step": 821 + }, + { + "epoch": 1.8261594001666204, + "grad_norm": 0.049642011523246765, + "learning_rate": 0.0013037037037037036, + "loss": 6.2051, + "step": 822 + }, + { + "epoch": 1.828381005276312, + "grad_norm": 0.04189665988087654, + "learning_rate": 0.0013012345679012346, + "loss": 6.1738, + "step": 823 + }, + { + "epoch": 1.8306026103860038, + "grad_norm": 0.03751196712255478, + "learning_rate": 0.0012987654320987653, + "loss": 6.135, + "step": 824 + }, + { + "epoch": 1.8328242154956955, + "grad_norm": 0.03689640015363693, + "learning_rate": 0.0012962962962962963, + "loss": 6.1697, + "step": 825 + }, + { + "epoch": 1.8350458206053872, + "grad_norm": 0.059797253459692, + "learning_rate": 0.0012938271604938272, + "loss": 5.9132, + "step": 826 + }, + { + "epoch": 1.8372674257150792, + "grad_norm": 0.054130177944898605, + "learning_rate": 0.0012913580246913582, + "loss": 6.1891, + "step": 827 + }, + { + "epoch": 1.8394890308247709, + "grad_norm": 0.044893085956573486, + "learning_rate": 0.001288888888888889, + "loss": 5.7673, + "step": 828 + }, + { + "epoch": 1.8417106359344626, + "grad_norm": 0.060131531208753586, + "learning_rate": 0.0012864197530864198, + "loss": 5.9341, + "step": 829 + }, + { + "epoch": 1.8439322410441545, + "grad_norm": 0.06052136793732643, + "learning_rate": 0.0012839506172839506, + "loss": 5.9462, + "step": 830 + }, + { + "epoch": 1.8461538461538463, + "grad_norm": 0.04839181900024414, + "learning_rate": 0.0012814814814814815, + "loss": 5.9377, + "step": 831 + }, + { + "epoch": 1.848375451263538, + "grad_norm": 0.04253244027495384, + "learning_rate": 0.0012790123456790123, + "loss": 5.9653, + "step": 832 + }, + { + "epoch": 1.8505970563732297, + "grad_norm": 0.07292971014976501, + "learning_rate": 0.0012765432098765432, + "loss": 5.8236, + "step": 833 + }, + { + "epoch": 1.8528186614829214, + "grad_norm": 0.04111470654606819, + "learning_rate": 0.0012740740740740742, + "loss": 6.0388, + "step": 834 + }, + { + "epoch": 1.8550402665926131, + "grad_norm": 0.04093720018863678, + "learning_rate": 0.001271604938271605, + "loss": 5.8761, + "step": 835 + }, + { + "epoch": 1.8572618717023048, + "grad_norm": 0.04653589800000191, + "learning_rate": 0.0012691358024691359, + "loss": 5.9278, + "step": 836 + }, + { + "epoch": 1.8594834768119965, + "grad_norm": 0.05398708954453468, + "learning_rate": 0.0012666666666666666, + "loss": 5.9163, + "step": 837 + }, + { + "epoch": 1.8617050819216883, + "grad_norm": 0.038633935153484344, + "learning_rate": 0.0012641975308641975, + "loss": 5.8916, + "step": 838 + }, + { + "epoch": 1.8639266870313802, + "grad_norm": 0.04764648526906967, + "learning_rate": 0.0012617283950617283, + "loss": 6.2452, + "step": 839 + }, + { + "epoch": 1.866148292141072, + "grad_norm": 0.052322935312986374, + "learning_rate": 0.0012592592592592592, + "loss": 5.9655, + "step": 840 + }, + { + "epoch": 1.8683698972507636, + "grad_norm": 0.03198683634400368, + "learning_rate": 0.0012567901234567902, + "loss": 6.2927, + "step": 841 + }, + { + "epoch": 1.8705915023604556, + "grad_norm": 0.04638659209012985, + "learning_rate": 0.0012543209876543211, + "loss": 5.9515, + "step": 842 + }, + { + "epoch": 1.8728131074701473, + "grad_norm": 0.03224872052669525, + "learning_rate": 0.0012518518518518519, + "loss": 6.0, + "step": 843 + }, + { + "epoch": 1.875034712579839, + "grad_norm": 0.04067224636673927, + "learning_rate": 0.0012493827160493828, + "loss": 5.9006, + "step": 844 + }, + { + "epoch": 1.8772563176895307, + "grad_norm": 0.03820433095097542, + "learning_rate": 0.0012469135802469136, + "loss": 5.9431, + "step": 845 + }, + { + "epoch": 1.8794779227992224, + "grad_norm": 0.04805198311805725, + "learning_rate": 0.0012444444444444445, + "loss": 6.2114, + "step": 846 + }, + { + "epoch": 1.8816995279089141, + "grad_norm": 0.060539450496435165, + "learning_rate": 0.0012419753086419752, + "loss": 6.051, + "step": 847 + }, + { + "epoch": 1.8839211330186059, + "grad_norm": 0.05590864643454552, + "learning_rate": 0.0012395061728395062, + "loss": 6.0311, + "step": 848 + }, + { + "epoch": 1.8861427381282976, + "grad_norm": 0.04005192592740059, + "learning_rate": 0.0012370370370370371, + "loss": 5.821, + "step": 849 + }, + { + "epoch": 1.8883643432379893, + "grad_norm": 0.04435478150844574, + "learning_rate": 0.0012345679012345679, + "loss": 6.0769, + "step": 850 + }, + { + "epoch": 1.8905859483476812, + "grad_norm": 0.05554912984371185, + "learning_rate": 0.0012320987654320988, + "loss": 6.1892, + "step": 851 + }, + { + "epoch": 1.892807553457373, + "grad_norm": 0.05200256034731865, + "learning_rate": 0.0012296296296296296, + "loss": 5.8477, + "step": 852 + }, + { + "epoch": 1.8950291585670647, + "grad_norm": 0.04454224184155464, + "learning_rate": 0.0012271604938271605, + "loss": 5.9329, + "step": 853 + }, + { + "epoch": 1.8972507636767566, + "grad_norm": 0.05036364868283272, + "learning_rate": 0.0012246913580246912, + "loss": 6.3992, + "step": 854 + }, + { + "epoch": 1.8994723687864483, + "grad_norm": 0.05385028198361397, + "learning_rate": 0.0012222222222222222, + "loss": 6.1398, + "step": 855 + }, + { + "epoch": 1.90169397389614, + "grad_norm": 0.04300863295793533, + "learning_rate": 0.0012197530864197532, + "loss": 5.894, + "step": 856 + }, + { + "epoch": 1.9039155790058317, + "grad_norm": 0.04779009521007538, + "learning_rate": 0.001217283950617284, + "loss": 5.9571, + "step": 857 + }, + { + "epoch": 1.9061371841155235, + "grad_norm": 0.03509556129574776, + "learning_rate": 0.0012148148148148148, + "loss": 6.0471, + "step": 858 + }, + { + "epoch": 1.9083587892252152, + "grad_norm": 0.044372253119945526, + "learning_rate": 0.0012123456790123458, + "loss": 5.7884, + "step": 859 + }, + { + "epoch": 1.910580394334907, + "grad_norm": 0.04641924053430557, + "learning_rate": 0.0012098765432098765, + "loss": 6.0532, + "step": 860 + }, + { + "epoch": 1.9128019994445986, + "grad_norm": 0.05756306275725365, + "learning_rate": 0.0012074074074074075, + "loss": 6.0295, + "step": 861 + }, + { + "epoch": 1.9150236045542903, + "grad_norm": 0.05073798447847366, + "learning_rate": 0.0012049382716049382, + "loss": 5.8082, + "step": 862 + }, + { + "epoch": 1.9172452096639823, + "grad_norm": 0.05037453770637512, + "learning_rate": 0.0012024691358024692, + "loss": 5.9952, + "step": 863 + }, + { + "epoch": 1.919466814773674, + "grad_norm": 0.047901567071676254, + "learning_rate": 0.0012000000000000001, + "loss": 6.0192, + "step": 864 + }, + { + "epoch": 1.9216884198833657, + "grad_norm": 0.05425766482949257, + "learning_rate": 0.0011975308641975308, + "loss": 5.9219, + "step": 865 + }, + { + "epoch": 1.9239100249930576, + "grad_norm": 0.05082131549715996, + "learning_rate": 0.0011950617283950618, + "loss": 5.8619, + "step": 866 + }, + { + "epoch": 1.9261316301027493, + "grad_norm": 0.04783160984516144, + "learning_rate": 0.0011925925925925925, + "loss": 6.1955, + "step": 867 + }, + { + "epoch": 1.928353235212441, + "grad_norm": 0.07691506296396255, + "learning_rate": 0.0011901234567901235, + "loss": 6.1067, + "step": 868 + }, + { + "epoch": 1.9305748403221328, + "grad_norm": 0.0328618660569191, + "learning_rate": 0.0011876543209876542, + "loss": 6.248, + "step": 869 + }, + { + "epoch": 1.9327964454318245, + "grad_norm": 0.0480203740298748, + "learning_rate": 0.0011851851851851852, + "loss": 5.823, + "step": 870 + }, + { + "epoch": 1.9350180505415162, + "grad_norm": 0.04856712743639946, + "learning_rate": 0.0011827160493827161, + "loss": 6.0644, + "step": 871 + }, + { + "epoch": 1.937239655651208, + "grad_norm": 0.06229954957962036, + "learning_rate": 0.001180246913580247, + "loss": 6.4556, + "step": 872 + }, + { + "epoch": 1.9394612607608996, + "grad_norm": 0.06782695651054382, + "learning_rate": 0.0011777777777777778, + "loss": 5.7211, + "step": 873 + }, + { + "epoch": 1.9416828658705914, + "grad_norm": 0.04558860510587692, + "learning_rate": 0.0011753086419753088, + "loss": 5.8684, + "step": 874 + }, + { + "epoch": 1.9439044709802833, + "grad_norm": 0.05876922979950905, + "learning_rate": 0.0011728395061728395, + "loss": 5.9356, + "step": 875 + }, + { + "epoch": 1.946126076089975, + "grad_norm": 0.05528077483177185, + "learning_rate": 0.0011703703703703704, + "loss": 6.09, + "step": 876 + }, + { + "epoch": 1.9483476811996667, + "grad_norm": 0.05866006761789322, + "learning_rate": 0.0011679012345679012, + "loss": 6.0989, + "step": 877 + }, + { + "epoch": 1.9505692863093587, + "grad_norm": 0.06259995698928833, + "learning_rate": 0.0011654320987654321, + "loss": 5.9589, + "step": 878 + }, + { + "epoch": 1.9527908914190504, + "grad_norm": 0.046732097864151, + "learning_rate": 0.001162962962962963, + "loss": 6.0792, + "step": 879 + }, + { + "epoch": 1.955012496528742, + "grad_norm": 0.04859071597456932, + "learning_rate": 0.0011604938271604938, + "loss": 5.9421, + "step": 880 + }, + { + "epoch": 1.9572341016384338, + "grad_norm": 0.07568682730197906, + "learning_rate": 0.0011580246913580248, + "loss": 6.0496, + "step": 881 + }, + { + "epoch": 1.9594557067481255, + "grad_norm": 0.07273954898118973, + "learning_rate": 0.0011555555555555555, + "loss": 6.1588, + "step": 882 + }, + { + "epoch": 1.9616773118578172, + "grad_norm": 0.048970118165016174, + "learning_rate": 0.0011530864197530865, + "loss": 5.9395, + "step": 883 + }, + { + "epoch": 1.963898916967509, + "grad_norm": 0.05090354010462761, + "learning_rate": 0.0011506172839506172, + "loss": 5.8694, + "step": 884 + }, + { + "epoch": 1.9661205220772007, + "grad_norm": 0.044794149696826935, + "learning_rate": 0.0011481481481481481, + "loss": 5.8916, + "step": 885 + }, + { + "epoch": 1.9683421271868924, + "grad_norm": 0.05274913087487221, + "learning_rate": 0.001145679012345679, + "loss": 6.1292, + "step": 886 + }, + { + "epoch": 1.9705637322965843, + "grad_norm": 0.05570974200963974, + "learning_rate": 0.00114320987654321, + "loss": 6.0561, + "step": 887 + }, + { + "epoch": 1.972785337406276, + "grad_norm": 0.059043627232313156, + "learning_rate": 0.0011407407407407408, + "loss": 6.1266, + "step": 888 + }, + { + "epoch": 1.9750069425159678, + "grad_norm": 0.051148053258657455, + "learning_rate": 0.0011382716049382717, + "loss": 6.0357, + "step": 889 + }, + { + "epoch": 1.9772285476256597, + "grad_norm": 0.08695517480373383, + "learning_rate": 0.0011358024691358025, + "loss": 5.7319, + "step": 890 + }, + { + "epoch": 1.9794501527353514, + "grad_norm": 0.05379338189959526, + "learning_rate": 0.0011333333333333334, + "loss": 5.6979, + "step": 891 + }, + { + "epoch": 1.9816717578450431, + "grad_norm": 0.044213637709617615, + "learning_rate": 0.0011308641975308641, + "loss": 6.0704, + "step": 892 + }, + { + "epoch": 1.9838933629547348, + "grad_norm": 0.04649046063423157, + "learning_rate": 0.001128395061728395, + "loss": 6.2296, + "step": 893 + }, + { + "epoch": 1.9861149680644266, + "grad_norm": 0.05491134151816368, + "learning_rate": 0.001125925925925926, + "loss": 6.1292, + "step": 894 + }, + { + "epoch": 1.9883365731741183, + "grad_norm": 0.07654581218957901, + "learning_rate": 0.0011234567901234568, + "loss": 6.1115, + "step": 895 + }, + { + "epoch": 1.99055817828381, + "grad_norm": 0.0780540183186531, + "learning_rate": 0.0011209876543209877, + "loss": 6.0279, + "step": 896 + }, + { + "epoch": 1.9927797833935017, + "grad_norm": 0.06326672434806824, + "learning_rate": 0.0011185185185185185, + "loss": 6.0289, + "step": 897 + }, + { + "epoch": 1.9950013885031934, + "grad_norm": 0.042030349373817444, + "learning_rate": 0.0011160493827160494, + "loss": 6.1248, + "step": 898 + }, + { + "epoch": 1.9972229936128854, + "grad_norm": 0.04940253868699074, + "learning_rate": 0.0011135802469135802, + "loss": 5.8414, + "step": 899 + }, + { + "epoch": 1.999444598722577, + "grad_norm": 0.04751197621226311, + "learning_rate": 0.0011111111111111111, + "loss": 5.9401, + "step": 900 + }, + { + "epoch": 2.001666203832269, + "grad_norm": 0.046355050057172775, + "learning_rate": 0.001108641975308642, + "loss": 6.3377, + "step": 901 + }, + { + "epoch": 2.0038878089419607, + "grad_norm": 0.05937094986438751, + "learning_rate": 0.001106172839506173, + "loss": 5.6215, + "step": 902 + }, + { + "epoch": 2.0061094140516524, + "grad_norm": 0.05452687293291092, + "learning_rate": 0.0011037037037037037, + "loss": 5.8461, + "step": 903 + }, + { + "epoch": 2.008331019161344, + "grad_norm": 0.058435868471860886, + "learning_rate": 0.0011012345679012347, + "loss": 6.185, + "step": 904 + }, + { + "epoch": 2.010552624271036, + "grad_norm": 0.053980663418769836, + "learning_rate": 0.0010987654320987654, + "loss": 5.6601, + "step": 905 + }, + { + "epoch": 2.0127742293807276, + "grad_norm": 0.06021531671285629, + "learning_rate": 0.0010962962962962964, + "loss": 6.1838, + "step": 906 + }, + { + "epoch": 2.0149958344904193, + "grad_norm": 0.06973876804113388, + "learning_rate": 0.0010938271604938271, + "loss": 6.1234, + "step": 907 + }, + { + "epoch": 2.017217439600111, + "grad_norm": 0.049507174640893936, + "learning_rate": 0.001091358024691358, + "loss": 6.0305, + "step": 908 + }, + { + "epoch": 2.0194390447098027, + "grad_norm": 0.04300854727625847, + "learning_rate": 0.001088888888888889, + "loss": 5.7729, + "step": 909 + }, + { + "epoch": 2.0216606498194944, + "grad_norm": 0.049711547791957855, + "learning_rate": 0.0010864197530864198, + "loss": 6.3389, + "step": 910 + }, + { + "epoch": 2.023882254929186, + "grad_norm": 0.061491284519433975, + "learning_rate": 0.0010839506172839507, + "loss": 6.0795, + "step": 911 + }, + { + "epoch": 2.026103860038878, + "grad_norm": 0.04442786052823067, + "learning_rate": 0.0010814814814814814, + "loss": 6.3198, + "step": 912 + }, + { + "epoch": 2.02832546514857, + "grad_norm": 0.05021671578288078, + "learning_rate": 0.0010790123456790124, + "loss": 6.0581, + "step": 913 + }, + { + "epoch": 2.0305470702582618, + "grad_norm": 0.06689216941595078, + "learning_rate": 0.0010765432098765431, + "loss": 5.8025, + "step": 914 + }, + { + "epoch": 2.0327686753679535, + "grad_norm": 0.03939792141318321, + "learning_rate": 0.001074074074074074, + "loss": 5.6586, + "step": 915 + }, + { + "epoch": 2.034990280477645, + "grad_norm": 0.035246171057224274, + "learning_rate": 0.0010716049382716048, + "loss": 5.8321, + "step": 916 + }, + { + "epoch": 2.037211885587337, + "grad_norm": 0.06485025584697723, + "learning_rate": 0.001069135802469136, + "loss": 6.0704, + "step": 917 + }, + { + "epoch": 2.0394334906970286, + "grad_norm": 0.04535793140530586, + "learning_rate": 0.0010666666666666667, + "loss": 6.3523, + "step": 918 + }, + { + "epoch": 2.0416550958067203, + "grad_norm": 0.03397076949477196, + "learning_rate": 0.0010641975308641977, + "loss": 5.8642, + "step": 919 + }, + { + "epoch": 2.043876700916412, + "grad_norm": 0.04708784073591232, + "learning_rate": 0.0010617283950617284, + "loss": 5.8981, + "step": 920 + }, + { + "epoch": 2.0460983060261038, + "grad_norm": 0.03657303377985954, + "learning_rate": 0.0010592592592592594, + "loss": 5.9779, + "step": 921 + }, + { + "epoch": 2.0483199111357955, + "grad_norm": 0.05296921357512474, + "learning_rate": 0.00105679012345679, + "loss": 5.8182, + "step": 922 + }, + { + "epoch": 2.050541516245487, + "grad_norm": 0.04714735597372055, + "learning_rate": 0.001054320987654321, + "loss": 5.9424, + "step": 923 + }, + { + "epoch": 2.052763121355179, + "grad_norm": 0.04999284818768501, + "learning_rate": 0.001051851851851852, + "loss": 6.2192, + "step": 924 + }, + { + "epoch": 2.054984726464871, + "grad_norm": 0.03968612477183342, + "learning_rate": 0.0010493827160493827, + "loss": 6.096, + "step": 925 + }, + { + "epoch": 2.057206331574563, + "grad_norm": 0.038185521960258484, + "learning_rate": 0.0010469135802469137, + "loss": 6.2274, + "step": 926 + }, + { + "epoch": 2.0594279366842545, + "grad_norm": 0.07575882226228714, + "learning_rate": 0.0010444444444444444, + "loss": 6.0759, + "step": 927 + }, + { + "epoch": 2.061649541793946, + "grad_norm": 0.05830669403076172, + "learning_rate": 0.0010419753086419754, + "loss": 5.8577, + "step": 928 + }, + { + "epoch": 2.063871146903638, + "grad_norm": 0.05522337928414345, + "learning_rate": 0.001039506172839506, + "loss": 5.9179, + "step": 929 + }, + { + "epoch": 2.0660927520133296, + "grad_norm": 0.04078824445605278, + "learning_rate": 0.001037037037037037, + "loss": 6.1919, + "step": 930 + }, + { + "epoch": 2.0683143571230214, + "grad_norm": 0.0531235933303833, + "learning_rate": 0.0010345679012345678, + "loss": 6.1473, + "step": 931 + }, + { + "epoch": 2.070535962232713, + "grad_norm": 0.06885974854230881, + "learning_rate": 0.001032098765432099, + "loss": 5.994, + "step": 932 + }, + { + "epoch": 2.072757567342405, + "grad_norm": 0.051051583141088486, + "learning_rate": 0.0010296296296296297, + "loss": 6.0154, + "step": 933 + }, + { + "epoch": 2.0749791724520965, + "grad_norm": 0.05040254071354866, + "learning_rate": 0.0010271604938271606, + "loss": 5.911, + "step": 934 + }, + { + "epoch": 2.0772007775617882, + "grad_norm": 0.048295751214027405, + "learning_rate": 0.0010246913580246914, + "loss": 6.2185, + "step": 935 + }, + { + "epoch": 2.07942238267148, + "grad_norm": 0.07192625105381012, + "learning_rate": 0.0010222222222222223, + "loss": 6.063, + "step": 936 + }, + { + "epoch": 2.081643987781172, + "grad_norm": 0.06737194210290909, + "learning_rate": 0.001019753086419753, + "loss": 5.9556, + "step": 937 + }, + { + "epoch": 2.083865592890864, + "grad_norm": 0.052287761121988297, + "learning_rate": 0.001017283950617284, + "loss": 5.891, + "step": 938 + }, + { + "epoch": 2.0860871980005555, + "grad_norm": 0.0601489432156086, + "learning_rate": 0.001014814814814815, + "loss": 5.9905, + "step": 939 + }, + { + "epoch": 2.0883088031102472, + "grad_norm": 0.04830304533243179, + "learning_rate": 0.0010123456790123457, + "loss": 5.9515, + "step": 940 + }, + { + "epoch": 2.090530408219939, + "grad_norm": 0.05534185469150543, + "learning_rate": 0.0010098765432098766, + "loss": 5.7164, + "step": 941 + }, + { + "epoch": 2.0927520133296307, + "grad_norm": 0.048778556287288666, + "learning_rate": 0.0010074074074074074, + "loss": 5.9678, + "step": 942 + }, + { + "epoch": 2.0949736184393224, + "grad_norm": 0.09851929545402527, + "learning_rate": 0.0010049382716049383, + "loss": 5.5509, + "step": 943 + }, + { + "epoch": 2.097195223549014, + "grad_norm": 0.05525709316134453, + "learning_rate": 0.001002469135802469, + "loss": 6.44, + "step": 944 + }, + { + "epoch": 2.099416828658706, + "grad_norm": 0.04130396246910095, + "learning_rate": 0.001, + "loss": 5.8139, + "step": 945 + }, + { + "epoch": 2.1016384337683975, + "grad_norm": 0.04005426913499832, + "learning_rate": 0.0009975308641975308, + "loss": 5.9008, + "step": 946 + }, + { + "epoch": 2.1038600388780893, + "grad_norm": 0.05195621773600578, + "learning_rate": 0.000995061728395062, + "loss": 5.9189, + "step": 947 + }, + { + "epoch": 2.106081643987781, + "grad_norm": 0.05769798904657364, + "learning_rate": 0.0009925925925925927, + "loss": 5.731, + "step": 948 + }, + { + "epoch": 2.108303249097473, + "grad_norm": 0.05508211627602577, + "learning_rate": 0.0009901234567901234, + "loss": 5.7188, + "step": 949 + }, + { + "epoch": 2.110524854207165, + "grad_norm": 0.044090636074543, + "learning_rate": 0.0009876543209876543, + "loss": 6.1145, + "step": 950 + }, + { + "epoch": 2.1127464593168566, + "grad_norm": 0.07029680907726288, + "learning_rate": 0.000985185185185185, + "loss": 6.0173, + "step": 951 + }, + { + "epoch": 2.1149680644265483, + "grad_norm": 0.051356032490730286, + "learning_rate": 0.000982716049382716, + "loss": 6.0828, + "step": 952 + }, + { + "epoch": 2.11718966953624, + "grad_norm": 0.06374996155500412, + "learning_rate": 0.0009802469135802468, + "loss": 5.9538, + "step": 953 + }, + { + "epoch": 2.1194112746459317, + "grad_norm": 0.05067945644259453, + "learning_rate": 0.000977777777777778, + "loss": 6.1191, + "step": 954 + }, + { + "epoch": 2.1216328797556234, + "grad_norm": 0.04271899163722992, + "learning_rate": 0.0009753086419753087, + "loss": 6.282, + "step": 955 + }, + { + "epoch": 2.123854484865315, + "grad_norm": 0.050401248037815094, + "learning_rate": 0.0009728395061728395, + "loss": 5.889, + "step": 956 + }, + { + "epoch": 2.126076089975007, + "grad_norm": 0.06142055243253708, + "learning_rate": 0.0009703703703703704, + "loss": 5.8915, + "step": 957 + }, + { + "epoch": 2.1282976950846986, + "grad_norm": 0.045352134853601456, + "learning_rate": 0.0009679012345679012, + "loss": 6.2175, + "step": 958 + }, + { + "epoch": 2.1305193001943903, + "grad_norm": 0.05124196782708168, + "learning_rate": 0.000965432098765432, + "loss": 6.108, + "step": 959 + }, + { + "epoch": 2.132740905304082, + "grad_norm": 0.07164105027914047, + "learning_rate": 0.0009629629629629629, + "loss": 5.7826, + "step": 960 + }, + { + "epoch": 2.134962510413774, + "grad_norm": 0.08650267869234085, + "learning_rate": 0.0009604938271604937, + "loss": 5.8555, + "step": 961 + }, + { + "epoch": 2.137184115523466, + "grad_norm": 0.09889843314886093, + "learning_rate": 0.0009580246913580248, + "loss": 5.9192, + "step": 962 + }, + { + "epoch": 2.1394057206331576, + "grad_norm": 0.085551917552948, + "learning_rate": 0.0009555555555555556, + "loss": 5.747, + "step": 963 + }, + { + "epoch": 2.1416273257428493, + "grad_norm": 0.0767153725028038, + "learning_rate": 0.0009530864197530865, + "loss": 6.2098, + "step": 964 + }, + { + "epoch": 2.143848930852541, + "grad_norm": 0.044550374150276184, + "learning_rate": 0.0009506172839506173, + "loss": 5.8646, + "step": 965 + }, + { + "epoch": 2.1460705359622327, + "grad_norm": 0.05971762165427208, + "learning_rate": 0.0009481481481481482, + "loss": 5.8602, + "step": 966 + }, + { + "epoch": 2.1482921410719245, + "grad_norm": 0.04591561481356621, + "learning_rate": 0.000945679012345679, + "loss": 5.8252, + "step": 967 + }, + { + "epoch": 2.150513746181616, + "grad_norm": 0.08381897956132889, + "learning_rate": 0.0009432098765432098, + "loss": 5.9186, + "step": 968 + }, + { + "epoch": 2.152735351291308, + "grad_norm": 0.0634213387966156, + "learning_rate": 0.0009407407407407408, + "loss": 5.8926, + "step": 969 + }, + { + "epoch": 2.1549569564009996, + "grad_norm": 0.05233628675341606, + "learning_rate": 0.0009382716049382716, + "loss": 5.6604, + "step": 970 + }, + { + "epoch": 2.1571785615106913, + "grad_norm": 0.05524004250764847, + "learning_rate": 0.0009358024691358025, + "loss": 6.064, + "step": 971 + }, + { + "epoch": 2.159400166620383, + "grad_norm": 0.047200750559568405, + "learning_rate": 0.0009333333333333333, + "loss": 5.7086, + "step": 972 + }, + { + "epoch": 2.1616217717300747, + "grad_norm": 0.044266339391469955, + "learning_rate": 0.0009308641975308642, + "loss": 5.8878, + "step": 973 + }, + { + "epoch": 2.163843376839767, + "grad_norm": 0.0752163678407669, + "learning_rate": 0.000928395061728395, + "loss": 5.9089, + "step": 974 + }, + { + "epoch": 2.1660649819494586, + "grad_norm": 0.049727752804756165, + "learning_rate": 0.0009259259259259259, + "loss": 6.0005, + "step": 975 + }, + { + "epoch": 2.1682865870591503, + "grad_norm": 0.05456957221031189, + "learning_rate": 0.0009234567901234567, + "loss": 6.0706, + "step": 976 + }, + { + "epoch": 2.170508192168842, + "grad_norm": 0.05987009406089783, + "learning_rate": 0.0009209876543209878, + "loss": 5.9255, + "step": 977 + }, + { + "epoch": 2.1727297972785338, + "grad_norm": 0.06586159765720367, + "learning_rate": 0.0009185185185185186, + "loss": 6.0354, + "step": 978 + }, + { + "epoch": 2.1749514023882255, + "grad_norm": 0.058533716946840286, + "learning_rate": 0.0009160493827160494, + "loss": 6.0168, + "step": 979 + }, + { + "epoch": 2.177173007497917, + "grad_norm": 0.0423620268702507, + "learning_rate": 0.0009135802469135803, + "loss": 5.6981, + "step": 980 + }, + { + "epoch": 2.179394612607609, + "grad_norm": 0.03982330858707428, + "learning_rate": 0.0009111111111111111, + "loss": 6.0324, + "step": 981 + }, + { + "epoch": 2.1816162177173006, + "grad_norm": 0.05875158682465553, + "learning_rate": 0.000908641975308642, + "loss": 5.7238, + "step": 982 + }, + { + "epoch": 2.1838378228269923, + "grad_norm": 0.05447810888290405, + "learning_rate": 0.0009061728395061728, + "loss": 5.9205, + "step": 983 + }, + { + "epoch": 2.186059427936684, + "grad_norm": 0.041009433567523956, + "learning_rate": 0.0009037037037037038, + "loss": 6.1676, + "step": 984 + }, + { + "epoch": 2.1882810330463762, + "grad_norm": 0.0804993212223053, + "learning_rate": 0.0009012345679012346, + "loss": 6.1122, + "step": 985 + }, + { + "epoch": 2.190502638156068, + "grad_norm": 0.04410282149910927, + "learning_rate": 0.0008987654320987655, + "loss": 5.8397, + "step": 986 + }, + { + "epoch": 2.1927242432657597, + "grad_norm": 0.047606438398361206, + "learning_rate": 0.0008962962962962963, + "loss": 6.1118, + "step": 987 + }, + { + "epoch": 2.1949458483754514, + "grad_norm": 0.04506830871105194, + "learning_rate": 0.0008938271604938271, + "loss": 5.7183, + "step": 988 + }, + { + "epoch": 2.197167453485143, + "grad_norm": 0.04143473878502846, + "learning_rate": 0.000891358024691358, + "loss": 5.5866, + "step": 989 + }, + { + "epoch": 2.199389058594835, + "grad_norm": 0.0455172173678875, + "learning_rate": 0.0008888888888888888, + "loss": 5.6809, + "step": 990 + }, + { + "epoch": 2.2016106637045265, + "grad_norm": 0.04900422319769859, + "learning_rate": 0.0008864197530864197, + "loss": 5.7769, + "step": 991 + }, + { + "epoch": 2.2038322688142182, + "grad_norm": 0.05145672708749771, + "learning_rate": 0.0008839506172839507, + "loss": 5.9521, + "step": 992 + }, + { + "epoch": 2.20605387392391, + "grad_norm": 0.0465843640267849, + "learning_rate": 0.0008814814814814816, + "loss": 5.9149, + "step": 993 + }, + { + "epoch": 2.2082754790336017, + "grad_norm": 0.07542437314987183, + "learning_rate": 0.0008790123456790124, + "loss": 6.0509, + "step": 994 + }, + { + "epoch": 2.2104970841432934, + "grad_norm": 0.05861465260386467, + "learning_rate": 0.0008765432098765433, + "loss": 5.7998, + "step": 995 + }, + { + "epoch": 2.212718689252985, + "grad_norm": 0.05698259547352791, + "learning_rate": 0.0008740740740740741, + "loss": 6.0667, + "step": 996 + }, + { + "epoch": 2.214940294362677, + "grad_norm": 0.054383549839258194, + "learning_rate": 0.0008716049382716049, + "loss": 5.8778, + "step": 997 + }, + { + "epoch": 2.217161899472369, + "grad_norm": 0.09017494320869446, + "learning_rate": 0.0008691358024691358, + "loss": 5.9201, + "step": 998 + }, + { + "epoch": 2.2193835045820607, + "grad_norm": 0.057505007833242416, + "learning_rate": 0.0008666666666666666, + "loss": 5.8489, + "step": 999 + }, + { + "epoch": 2.2216051096917524, + "grad_norm": 0.047291986644268036, + "learning_rate": 0.0008641975308641976, + "loss": 6.0737, + "step": 1000 + }, + { + "epoch": 2.223826714801444, + "grad_norm": 0.0538238063454628, + "learning_rate": 0.0008617283950617284, + "loss": 6.228, + "step": 1001 + }, + { + "epoch": 2.226048319911136, + "grad_norm": 0.04707043245434761, + "learning_rate": 0.0008592592592592593, + "loss": 6.1274, + "step": 1002 + }, + { + "epoch": 2.2282699250208275, + "grad_norm": 0.06756126880645752, + "learning_rate": 0.0008567901234567901, + "loss": 6.0493, + "step": 1003 + }, + { + "epoch": 2.2304915301305193, + "grad_norm": 0.05249028652906418, + "learning_rate": 0.000854320987654321, + "loss": 5.8115, + "step": 1004 + }, + { + "epoch": 2.232713135240211, + "grad_norm": 0.04003097116947174, + "learning_rate": 0.0008518518518518518, + "loss": 5.7495, + "step": 1005 + }, + { + "epoch": 2.2349347403499027, + "grad_norm": 0.04583067446947098, + "learning_rate": 0.0008493827160493826, + "loss": 6.0212, + "step": 1006 + }, + { + "epoch": 2.2371563454595944, + "grad_norm": 0.044572725892066956, + "learning_rate": 0.0008469135802469137, + "loss": 5.9738, + "step": 1007 + }, + { + "epoch": 2.239377950569286, + "grad_norm": 0.046149302273988724, + "learning_rate": 0.0008444444444444445, + "loss": 5.8659, + "step": 1008 + }, + { + "epoch": 2.2415995556789783, + "grad_norm": 0.06371072679758072, + "learning_rate": 0.0008419753086419754, + "loss": 5.7136, + "step": 1009 + }, + { + "epoch": 2.24382116078867, + "grad_norm": 0.06779153645038605, + "learning_rate": 0.0008395061728395062, + "loss": 5.7331, + "step": 1010 + }, + { + "epoch": 2.2460427658983617, + "grad_norm": 0.06367138028144836, + "learning_rate": 0.0008370370370370371, + "loss": 5.6489, + "step": 1011 + }, + { + "epoch": 2.2482643710080534, + "grad_norm": 0.05142154544591904, + "learning_rate": 0.0008345679012345679, + "loss": 5.8864, + "step": 1012 + }, + { + "epoch": 2.250485976117745, + "grad_norm": 0.057787828147411346, + "learning_rate": 0.0008320987654320988, + "loss": 5.9604, + "step": 1013 + }, + { + "epoch": 2.252707581227437, + "grad_norm": 0.0527784489095211, + "learning_rate": 0.0008296296296296296, + "loss": 5.7835, + "step": 1014 + }, + { + "epoch": 2.2549291863371286, + "grad_norm": 0.07328028976917267, + "learning_rate": 0.0008271604938271605, + "loss": 5.7806, + "step": 1015 + }, + { + "epoch": 2.2571507914468203, + "grad_norm": 0.04314279928803444, + "learning_rate": 0.0008246913580246914, + "loss": 6.1362, + "step": 1016 + }, + { + "epoch": 2.259372396556512, + "grad_norm": 0.05321420729160309, + "learning_rate": 0.0008222222222222222, + "loss": 5.876, + "step": 1017 + }, + { + "epoch": 2.2615940016662037, + "grad_norm": 0.051915451884269714, + "learning_rate": 0.0008197530864197531, + "loss": 5.952, + "step": 1018 + }, + { + "epoch": 2.2638156067758954, + "grad_norm": 0.04124360531568527, + "learning_rate": 0.0008172839506172839, + "loss": 5.9982, + "step": 1019 + }, + { + "epoch": 2.266037211885587, + "grad_norm": 0.05607706680893898, + "learning_rate": 0.0008148148148148148, + "loss": 5.6548, + "step": 1020 + }, + { + "epoch": 2.268258816995279, + "grad_norm": 0.04974445328116417, + "learning_rate": 0.0008123456790123456, + "loss": 5.9214, + "step": 1021 + }, + { + "epoch": 2.270480422104971, + "grad_norm": 0.054538171738386154, + "learning_rate": 0.0008098765432098767, + "loss": 5.8079, + "step": 1022 + }, + { + "epoch": 2.2727020272146627, + "grad_norm": 0.07355673611164093, + "learning_rate": 0.0008074074074074075, + "loss": 5.7347, + "step": 1023 + }, + { + "epoch": 2.2749236323243545, + "grad_norm": 0.04785652458667755, + "learning_rate": 0.0008049382716049384, + "loss": 5.8351, + "step": 1024 + }, + { + "epoch": 2.277145237434046, + "grad_norm": 0.0509219616651535, + "learning_rate": 0.0008024691358024692, + "loss": 5.9801, + "step": 1025 + }, + { + "epoch": 2.279366842543738, + "grad_norm": 0.06415324658155441, + "learning_rate": 0.0008, + "loss": 5.9359, + "step": 1026 + }, + { + "epoch": 2.2815884476534296, + "grad_norm": 0.05211460962891579, + "learning_rate": 0.0007975308641975309, + "loss": 5.9854, + "step": 1027 + }, + { + "epoch": 2.2838100527631213, + "grad_norm": 0.07868483662605286, + "learning_rate": 0.0007950617283950617, + "loss": 6.066, + "step": 1028 + }, + { + "epoch": 2.286031657872813, + "grad_norm": 0.056249335408210754, + "learning_rate": 0.0007925925925925926, + "loss": 5.9595, + "step": 1029 + }, + { + "epoch": 2.2882532629825048, + "grad_norm": 0.034571342170238495, + "learning_rate": 0.0007901234567901235, + "loss": 6.1133, + "step": 1030 + }, + { + "epoch": 2.2904748680921965, + "grad_norm": 0.06290554255247116, + "learning_rate": 0.0007876543209876544, + "loss": 6.0943, + "step": 1031 + }, + { + "epoch": 2.292696473201888, + "grad_norm": 0.05772720277309418, + "learning_rate": 0.0007851851851851852, + "loss": 5.7976, + "step": 1032 + }, + { + "epoch": 2.2949180783115803, + "grad_norm": 0.0646650418639183, + "learning_rate": 0.000782716049382716, + "loss": 6.0501, + "step": 1033 + }, + { + "epoch": 2.297139683421272, + "grad_norm": 0.06781128793954849, + "learning_rate": 0.0007802469135802469, + "loss": 6.1345, + "step": 1034 + }, + { + "epoch": 2.2993612885309638, + "grad_norm": 0.05525821074843407, + "learning_rate": 0.0007777777777777777, + "loss": 5.9788, + "step": 1035 + }, + { + "epoch": 2.3015828936406555, + "grad_norm": 0.0514153353869915, + "learning_rate": 0.0007753086419753086, + "loss": 5.6737, + "step": 1036 + }, + { + "epoch": 2.303804498750347, + "grad_norm": 0.07104574888944626, + "learning_rate": 0.0007728395061728396, + "loss": 5.8728, + "step": 1037 + }, + { + "epoch": 2.306026103860039, + "grad_norm": 0.07069091498851776, + "learning_rate": 0.0007703703703703705, + "loss": 5.9203, + "step": 1038 + }, + { + "epoch": 2.3082477089697306, + "grad_norm": 0.07263226062059402, + "learning_rate": 0.0007679012345679013, + "loss": 5.8229, + "step": 1039 + }, + { + "epoch": 2.3104693140794224, + "grad_norm": 0.04691462963819504, + "learning_rate": 0.0007654320987654322, + "loss": 6.0475, + "step": 1040 + }, + { + "epoch": 2.312690919189114, + "grad_norm": 0.049608971923589706, + "learning_rate": 0.000762962962962963, + "loss": 6.0244, + "step": 1041 + }, + { + "epoch": 2.314912524298806, + "grad_norm": 0.04370072856545448, + "learning_rate": 0.0007604938271604939, + "loss": 6.0077, + "step": 1042 + }, + { + "epoch": 2.3171341294084975, + "grad_norm": 0.07816038280725479, + "learning_rate": 0.0007580246913580247, + "loss": 5.7582, + "step": 1043 + }, + { + "epoch": 2.319355734518189, + "grad_norm": 0.04159904271364212, + "learning_rate": 0.0007555555555555555, + "loss": 5.9899, + "step": 1044 + }, + { + "epoch": 2.321577339627881, + "grad_norm": 0.06587567180395126, + "learning_rate": 0.0007530864197530865, + "loss": 6.055, + "step": 1045 + }, + { + "epoch": 2.3237989447375726, + "grad_norm": 0.049506593495607376, + "learning_rate": 0.0007506172839506173, + "loss": 6.0308, + "step": 1046 + }, + { + "epoch": 2.326020549847265, + "grad_norm": 0.05315249413251877, + "learning_rate": 0.0007481481481481482, + "loss": 5.6239, + "step": 1047 + }, + { + "epoch": 2.3282421549569565, + "grad_norm": 0.0802527591586113, + "learning_rate": 0.000745679012345679, + "loss": 5.8756, + "step": 1048 + }, + { + "epoch": 2.3304637600666482, + "grad_norm": 0.05673157796263695, + "learning_rate": 0.0007432098765432099, + "loss": 5.8657, + "step": 1049 + }, + { + "epoch": 2.33268536517634, + "grad_norm": 0.03893468156456947, + "learning_rate": 0.0007407407407407407, + "loss": 5.9952, + "step": 1050 + }, + { + "epoch": 2.3349069702860317, + "grad_norm": 0.0511779710650444, + "learning_rate": 0.0007382716049382717, + "loss": 5.493, + "step": 1051 + }, + { + "epoch": 2.3371285753957234, + "grad_norm": 0.038521114736795425, + "learning_rate": 0.0007358024691358025, + "loss": 5.7782, + "step": 1052 + }, + { + "epoch": 2.339350180505415, + "grad_norm": 0.08118283748626709, + "learning_rate": 0.0007333333333333333, + "loss": 5.9379, + "step": 1053 + }, + { + "epoch": 2.341571785615107, + "grad_norm": 0.04421069100499153, + "learning_rate": 0.0007308641975308643, + "loss": 5.9435, + "step": 1054 + }, + { + "epoch": 2.3437933907247985, + "grad_norm": 0.051811590790748596, + "learning_rate": 0.0007283950617283951, + "loss": 6.0192, + "step": 1055 + }, + { + "epoch": 2.3460149958344902, + "grad_norm": 0.0796356350183487, + "learning_rate": 0.000725925925925926, + "loss": 5.628, + "step": 1056 + }, + { + "epoch": 2.3482366009441824, + "grad_norm": 0.06779766827821732, + "learning_rate": 0.0007234567901234568, + "loss": 5.8107, + "step": 1057 + }, + { + "epoch": 2.350458206053874, + "grad_norm": 0.07066726684570312, + "learning_rate": 0.0007209876543209877, + "loss": 5.8353, + "step": 1058 + }, + { + "epoch": 2.352679811163566, + "grad_norm": 0.06709866970777512, + "learning_rate": 0.0007185185185185185, + "loss": 5.8774, + "step": 1059 + }, + { + "epoch": 2.3549014162732576, + "grad_norm": 0.061553098261356354, + "learning_rate": 0.0007160493827160494, + "loss": 5.5262, + "step": 1060 + }, + { + "epoch": 2.3571230213829493, + "grad_norm": 0.04372899979352951, + "learning_rate": 0.0007135802469135802, + "loss": 5.9394, + "step": 1061 + }, + { + "epoch": 2.359344626492641, + "grad_norm": 0.04984388127923012, + "learning_rate": 0.0007111111111111111, + "loss": 6.1919, + "step": 1062 + }, + { + "epoch": 2.3615662316023327, + "grad_norm": 0.05325157940387726, + "learning_rate": 0.000708641975308642, + "loss": 5.8512, + "step": 1063 + }, + { + "epoch": 2.3637878367120244, + "grad_norm": 0.06130823865532875, + "learning_rate": 0.0007061728395061728, + "loss": 5.8554, + "step": 1064 + }, + { + "epoch": 2.366009441821716, + "grad_norm": 0.043308887630701065, + "learning_rate": 0.0007037037037037037, + "loss": 5.8488, + "step": 1065 + }, + { + "epoch": 2.368231046931408, + "grad_norm": 0.0811159536242485, + "learning_rate": 0.0007012345679012346, + "loss": 5.781, + "step": 1066 + }, + { + "epoch": 2.3704526520410996, + "grad_norm": 0.056953951716423035, + "learning_rate": 0.0006987654320987655, + "loss": 5.7746, + "step": 1067 + }, + { + "epoch": 2.3726742571507913, + "grad_norm": 0.06400550156831741, + "learning_rate": 0.0006962962962962963, + "loss": 5.9784, + "step": 1068 + }, + { + "epoch": 2.374895862260483, + "grad_norm": 0.07541897892951965, + "learning_rate": 0.0006938271604938273, + "loss": 5.804, + "step": 1069 + }, + { + "epoch": 2.3771174673701747, + "grad_norm": 0.05397675186395645, + "learning_rate": 0.0006913580246913581, + "loss": 6.0465, + "step": 1070 + }, + { + "epoch": 2.379339072479867, + "grad_norm": 0.06133056432008743, + "learning_rate": 0.000688888888888889, + "loss": 5.7814, + "step": 1071 + }, + { + "epoch": 2.3815606775895586, + "grad_norm": 0.04709472507238388, + "learning_rate": 0.0006864197530864198, + "loss": 5.9838, + "step": 1072 + }, + { + "epoch": 2.3837822826992503, + "grad_norm": 0.07361142337322235, + "learning_rate": 0.0006839506172839506, + "loss": 5.9829, + "step": 1073 + }, + { + "epoch": 2.386003887808942, + "grad_norm": 0.05288068205118179, + "learning_rate": 0.0006814814814814815, + "loss": 5.4474, + "step": 1074 + }, + { + "epoch": 2.3882254929186337, + "grad_norm": 0.060563940554857254, + "learning_rate": 0.0006790123456790123, + "loss": 6.3124, + "step": 1075 + }, + { + "epoch": 2.3904470980283254, + "grad_norm": 0.04115691035985947, + "learning_rate": 0.0006765432098765432, + "loss": 5.8899, + "step": 1076 + }, + { + "epoch": 2.392668703138017, + "grad_norm": 0.07105467468500137, + "learning_rate": 0.0006740740740740741, + "loss": 5.896, + "step": 1077 + }, + { + "epoch": 2.394890308247709, + "grad_norm": 0.06776075065135956, + "learning_rate": 0.000671604938271605, + "loss": 5.9926, + "step": 1078 + }, + { + "epoch": 2.3971119133574006, + "grad_norm": 0.04716327413916588, + "learning_rate": 0.0006691358024691358, + "loss": 5.8439, + "step": 1079 + }, + { + "epoch": 2.3993335184670923, + "grad_norm": 0.04947780445218086, + "learning_rate": 0.0006666666666666666, + "loss": 5.8964, + "step": 1080 + }, + { + "epoch": 2.4015551235767845, + "grad_norm": 0.08443167805671692, + "learning_rate": 0.0006641975308641976, + "loss": 5.9658, + "step": 1081 + }, + { + "epoch": 2.403776728686476, + "grad_norm": 0.05708467215299606, + "learning_rate": 0.0006617283950617284, + "loss": 5.5061, + "step": 1082 + }, + { + "epoch": 2.405998333796168, + "grad_norm": 0.08224623650312424, + "learning_rate": 0.0006592592592592593, + "loss": 5.6903, + "step": 1083 + }, + { + "epoch": 2.4082199389058596, + "grad_norm": 0.059096187353134155, + "learning_rate": 0.0006567901234567901, + "loss": 5.8495, + "step": 1084 + }, + { + "epoch": 2.4104415440155513, + "grad_norm": 0.08303750306367874, + "learning_rate": 0.000654320987654321, + "loss": 5.9612, + "step": 1085 + }, + { + "epoch": 2.412663149125243, + "grad_norm": 0.06013647839426994, + "learning_rate": 0.0006518518518518518, + "loss": 5.8311, + "step": 1086 + }, + { + "epoch": 2.4148847542349348, + "grad_norm": 0.06758897751569748, + "learning_rate": 0.0006493827160493827, + "loss": 6.0291, + "step": 1087 + }, + { + "epoch": 2.4171063593446265, + "grad_norm": 0.06581225246191025, + "learning_rate": 0.0006469135802469136, + "loss": 6.1888, + "step": 1088 + }, + { + "epoch": 2.419327964454318, + "grad_norm": 0.04392261058092117, + "learning_rate": 0.0006444444444444444, + "loss": 5.7335, + "step": 1089 + }, + { + "epoch": 2.42154956956401, + "grad_norm": 0.05450277030467987, + "learning_rate": 0.0006419753086419753, + "loss": 5.7078, + "step": 1090 + }, + { + "epoch": 2.4237711746737016, + "grad_norm": 0.054721567779779434, + "learning_rate": 0.0006395061728395061, + "loss": 6.0187, + "step": 1091 + }, + { + "epoch": 2.4259927797833933, + "grad_norm": 0.06465310603380203, + "learning_rate": 0.0006370370370370371, + "loss": 6.0899, + "step": 1092 + }, + { + "epoch": 2.428214384893085, + "grad_norm": 0.04651114344596863, + "learning_rate": 0.0006345679012345679, + "loss": 5.844, + "step": 1093 + }, + { + "epoch": 2.4304359900027768, + "grad_norm": 0.07651801407337189, + "learning_rate": 0.0006320987654320988, + "loss": 5.6907, + "step": 1094 + }, + { + "epoch": 2.432657595112469, + "grad_norm": 0.04985710605978966, + "learning_rate": 0.0006296296296296296, + "loss": 5.9672, + "step": 1095 + }, + { + "epoch": 2.4348792002221606, + "grad_norm": 0.05078880116343498, + "learning_rate": 0.0006271604938271606, + "loss": 5.8864, + "step": 1096 + }, + { + "epoch": 2.4371008053318524, + "grad_norm": 0.05990299582481384, + "learning_rate": 0.0006246913580246914, + "loss": 6.0482, + "step": 1097 + }, + { + "epoch": 2.439322410441544, + "grad_norm": 0.06744808703660965, + "learning_rate": 0.0006222222222222223, + "loss": 5.7663, + "step": 1098 + }, + { + "epoch": 2.441544015551236, + "grad_norm": 0.06793643534183502, + "learning_rate": 0.0006197530864197531, + "loss": 6.1515, + "step": 1099 + }, + { + "epoch": 2.4437656206609275, + "grad_norm": 0.054759904742240906, + "learning_rate": 0.0006172839506172839, + "loss": 5.8285, + "step": 1100 + }, + { + "epoch": 2.4459872257706192, + "grad_norm": 0.052103910595178604, + "learning_rate": 0.0006148148148148148, + "loss": 5.8897, + "step": 1101 + }, + { + "epoch": 2.448208830880311, + "grad_norm": 0.05217638239264488, + "learning_rate": 0.0006123456790123456, + "loss": 6.143, + "step": 1102 + }, + { + "epoch": 2.4504304359900027, + "grad_norm": 0.07950019836425781, + "learning_rate": 0.0006098765432098766, + "loss": 5.947, + "step": 1103 + }, + { + "epoch": 2.4526520410996944, + "grad_norm": 0.04472443833947182, + "learning_rate": 0.0006074074074074074, + "loss": 5.876, + "step": 1104 + }, + { + "epoch": 2.4548736462093865, + "grad_norm": 0.04999097064137459, + "learning_rate": 0.0006049382716049383, + "loss": 5.8068, + "step": 1105 + }, + { + "epoch": 2.4570952513190782, + "grad_norm": 0.043887995183467865, + "learning_rate": 0.0006024691358024691, + "loss": 5.6662, + "step": 1106 + }, + { + "epoch": 2.45931685642877, + "grad_norm": 0.053057532757520676, + "learning_rate": 0.0006000000000000001, + "loss": 5.7395, + "step": 1107 + }, + { + "epoch": 2.4615384615384617, + "grad_norm": 0.09846268594264984, + "learning_rate": 0.0005975308641975309, + "loss": 6.1488, + "step": 1108 + }, + { + "epoch": 2.4637600666481534, + "grad_norm": 0.058038853108882904, + "learning_rate": 0.0005950617283950617, + "loss": 5.6383, + "step": 1109 + }, + { + "epoch": 2.465981671757845, + "grad_norm": 0.038222573697566986, + "learning_rate": 0.0005925925925925926, + "loss": 5.9628, + "step": 1110 + }, + { + "epoch": 2.468203276867537, + "grad_norm": 0.06453481316566467, + "learning_rate": 0.0005901234567901235, + "loss": 5.7804, + "step": 1111 + }, + { + "epoch": 2.4704248819772285, + "grad_norm": 0.05904708802700043, + "learning_rate": 0.0005876543209876544, + "loss": 5.9227, + "step": 1112 + }, + { + "epoch": 2.4726464870869203, + "grad_norm": 0.04702883958816528, + "learning_rate": 0.0005851851851851852, + "loss": 5.8564, + "step": 1113 + }, + { + "epoch": 2.474868092196612, + "grad_norm": 0.07017482072114944, + "learning_rate": 0.0005827160493827161, + "loss": 5.9593, + "step": 1114 + }, + { + "epoch": 2.4770896973063037, + "grad_norm": 0.05562103912234306, + "learning_rate": 0.0005802469135802469, + "loss": 5.6843, + "step": 1115 + }, + { + "epoch": 2.4793113024159954, + "grad_norm": 0.0637110024690628, + "learning_rate": 0.0005777777777777778, + "loss": 5.7582, + "step": 1116 + }, + { + "epoch": 2.481532907525687, + "grad_norm": 0.05489303171634674, + "learning_rate": 0.0005753086419753086, + "loss": 5.6139, + "step": 1117 + }, + { + "epoch": 2.483754512635379, + "grad_norm": 0.05454476550221443, + "learning_rate": 0.0005728395061728395, + "loss": 5.7242, + "step": 1118 + }, + { + "epoch": 2.485976117745071, + "grad_norm": 0.04765939712524414, + "learning_rate": 0.0005703703703703704, + "loss": 6.0174, + "step": 1119 + }, + { + "epoch": 2.4881977228547627, + "grad_norm": 0.04585859924554825, + "learning_rate": 0.0005679012345679012, + "loss": 5.9768, + "step": 1120 + }, + { + "epoch": 2.4904193279644544, + "grad_norm": 0.05365516245365143, + "learning_rate": 0.0005654320987654321, + "loss": 6.0696, + "step": 1121 + }, + { + "epoch": 2.492640933074146, + "grad_norm": 0.04974067583680153, + "learning_rate": 0.000562962962962963, + "loss": 5.9656, + "step": 1122 + }, + { + "epoch": 2.494862538183838, + "grad_norm": 0.056851621717214584, + "learning_rate": 0.0005604938271604939, + "loss": 6.3004, + "step": 1123 + }, + { + "epoch": 2.4970841432935296, + "grad_norm": 0.055615637451410294, + "learning_rate": 0.0005580246913580247, + "loss": 5.9742, + "step": 1124 + }, + { + "epoch": 2.4993057484032213, + "grad_norm": 0.05103308707475662, + "learning_rate": 0.0005555555555555556, + "loss": 5.7587, + "step": 1125 + }, + { + "epoch": 2.501527353512913, + "grad_norm": 0.052293259650468826, + "learning_rate": 0.0005530864197530865, + "loss": 5.8624, + "step": 1126 + }, + { + "epoch": 2.5037489586226047, + "grad_norm": 0.05106515437364578, + "learning_rate": 0.0005506172839506173, + "loss": 5.8215, + "step": 1127 + }, + { + "epoch": 2.5059705637322964, + "grad_norm": 0.05350141227245331, + "learning_rate": 0.0005481481481481482, + "loss": 5.491, + "step": 1128 + }, + { + "epoch": 2.5081921688419886, + "grad_norm": 0.048063699156045914, + "learning_rate": 0.000545679012345679, + "loss": 5.618, + "step": 1129 + }, + { + "epoch": 2.5104137739516803, + "grad_norm": 0.041624557226896286, + "learning_rate": 0.0005432098765432099, + "loss": 5.872, + "step": 1130 + }, + { + "epoch": 2.512635379061372, + "grad_norm": 0.05397861450910568, + "learning_rate": 0.0005407407407407407, + "loss": 5.5568, + "step": 1131 + }, + { + "epoch": 2.5148569841710637, + "grad_norm": 0.04723358154296875, + "learning_rate": 0.0005382716049382716, + "loss": 5.7521, + "step": 1132 + }, + { + "epoch": 2.5170785892807555, + "grad_norm": 0.04932169243693352, + "learning_rate": 0.0005358024691358024, + "loss": 5.6926, + "step": 1133 + }, + { + "epoch": 2.519300194390447, + "grad_norm": 0.049961794167757034, + "learning_rate": 0.0005333333333333334, + "loss": 5.8031, + "step": 1134 + }, + { + "epoch": 2.521521799500139, + "grad_norm": 0.06546290963888168, + "learning_rate": 0.0005308641975308642, + "loss": 5.948, + "step": 1135 + }, + { + "epoch": 2.5237434046098306, + "grad_norm": 0.05172509700059891, + "learning_rate": 0.000528395061728395, + "loss": 5.9811, + "step": 1136 + }, + { + "epoch": 2.5259650097195223, + "grad_norm": 0.05558503046631813, + "learning_rate": 0.000525925925925926, + "loss": 5.5325, + "step": 1137 + }, + { + "epoch": 2.528186614829214, + "grad_norm": 0.04770063981413841, + "learning_rate": 0.0005234567901234568, + "loss": 6.0555, + "step": 1138 + }, + { + "epoch": 2.5304082199389057, + "grad_norm": 0.05725065991282463, + "learning_rate": 0.0005209876543209877, + "loss": 6.4155, + "step": 1139 + }, + { + "epoch": 2.5326298250485975, + "grad_norm": 0.056243401020765305, + "learning_rate": 0.0005185185185185185, + "loss": 5.6629, + "step": 1140 + }, + { + "epoch": 2.534851430158289, + "grad_norm": 0.05091274157166481, + "learning_rate": 0.0005160493827160495, + "loss": 6.0942, + "step": 1141 + }, + { + "epoch": 2.537073035267981, + "grad_norm": 0.08415589481592178, + "learning_rate": 0.0005135802469135803, + "loss": 6.0429, + "step": 1142 + }, + { + "epoch": 2.5392946403776726, + "grad_norm": 0.05046810582280159, + "learning_rate": 0.0005111111111111112, + "loss": 6.0881, + "step": 1143 + }, + { + "epoch": 2.5415162454873648, + "grad_norm": 0.05902523174881935, + "learning_rate": 0.000508641975308642, + "loss": 5.7132, + "step": 1144 + }, + { + "epoch": 2.5437378505970565, + "grad_norm": 0.057459522038698196, + "learning_rate": 0.0005061728395061728, + "loss": 5.9, + "step": 1145 + }, + { + "epoch": 2.545959455706748, + "grad_norm": 0.052608080208301544, + "learning_rate": 0.0005037037037037037, + "loss": 6.0521, + "step": 1146 + }, + { + "epoch": 2.54818106081644, + "grad_norm": 0.03979867696762085, + "learning_rate": 0.0005012345679012345, + "loss": 5.5871, + "step": 1147 + }, + { + "epoch": 2.5504026659261316, + "grad_norm": 0.04679068177938461, + "learning_rate": 0.0004987654320987654, + "loss": 5.9721, + "step": 1148 + }, + { + "epoch": 2.5526242710358233, + "grad_norm": 0.07402826845645905, + "learning_rate": 0.0004962962962962963, + "loss": 5.858, + "step": 1149 + }, + { + "epoch": 2.554845876145515, + "grad_norm": 0.054051779210567474, + "learning_rate": 0.0004938271604938272, + "loss": 5.8653, + "step": 1150 + }, + { + "epoch": 2.5570674812552068, + "grad_norm": 0.06473316252231598, + "learning_rate": 0.000491358024691358, + "loss": 5.7465, + "step": 1151 + }, + { + "epoch": 2.5592890863648985, + "grad_norm": 0.04736412316560745, + "learning_rate": 0.000488888888888889, + "loss": 5.8293, + "step": 1152 + }, + { + "epoch": 2.5615106914745907, + "grad_norm": 0.04081957787275314, + "learning_rate": 0.00048641975308641976, + "loss": 5.8449, + "step": 1153 + }, + { + "epoch": 2.5637322965842824, + "grad_norm": 0.04959014803171158, + "learning_rate": 0.0004839506172839506, + "loss": 5.729, + "step": 1154 + }, + { + "epoch": 2.565953901693974, + "grad_norm": 0.09538524597883224, + "learning_rate": 0.00048148148148148144, + "loss": 5.7509, + "step": 1155 + }, + { + "epoch": 2.568175506803666, + "grad_norm": 0.05531400814652443, + "learning_rate": 0.0004790123456790124, + "loss": 5.8802, + "step": 1156 + }, + { + "epoch": 2.5703971119133575, + "grad_norm": 0.06068155914545059, + "learning_rate": 0.00047654320987654324, + "loss": 6.134, + "step": 1157 + }, + { + "epoch": 2.5726187170230492, + "grad_norm": 0.05842263251543045, + "learning_rate": 0.0004740740740740741, + "loss": 5.8351, + "step": 1158 + }, + { + "epoch": 2.574840322132741, + "grad_norm": 0.06507426500320435, + "learning_rate": 0.0004716049382716049, + "loss": 5.5752, + "step": 1159 + }, + { + "epoch": 2.5770619272424327, + "grad_norm": 0.04701032117009163, + "learning_rate": 0.0004691358024691358, + "loss": 6.0044, + "step": 1160 + }, + { + "epoch": 2.5792835323521244, + "grad_norm": 0.04070613533258438, + "learning_rate": 0.00046666666666666666, + "loss": 6.0465, + "step": 1161 + }, + { + "epoch": 2.581505137461816, + "grad_norm": 0.04922967404127121, + "learning_rate": 0.0004641975308641975, + "loss": 5.9557, + "step": 1162 + }, + { + "epoch": 2.583726742571508, + "grad_norm": 0.055332690477371216, + "learning_rate": 0.00046172839506172835, + "loss": 5.9638, + "step": 1163 + }, + { + "epoch": 2.5859483476811995, + "grad_norm": 0.054886020720005035, + "learning_rate": 0.0004592592592592593, + "loss": 6.1071, + "step": 1164 + }, + { + "epoch": 2.5881699527908912, + "grad_norm": 0.0459747239947319, + "learning_rate": 0.00045679012345679014, + "loss": 5.9422, + "step": 1165 + }, + { + "epoch": 2.590391557900583, + "grad_norm": 0.0674658864736557, + "learning_rate": 0.000454320987654321, + "loss": 5.9015, + "step": 1166 + }, + { + "epoch": 2.5926131630102747, + "grad_norm": 0.06457730382680893, + "learning_rate": 0.0004518518518518519, + "loss": 6.177, + "step": 1167 + }, + { + "epoch": 2.594834768119967, + "grad_norm": 0.05291493982076645, + "learning_rate": 0.0004493827160493827, + "loss": 6.1573, + "step": 1168 + }, + { + "epoch": 2.5970563732296585, + "grad_norm": 0.0366109199821949, + "learning_rate": 0.00044691358024691357, + "loss": 5.9545, + "step": 1169 + }, + { + "epoch": 2.5992779783393503, + "grad_norm": 0.07245033979415894, + "learning_rate": 0.0004444444444444444, + "loss": 5.7638, + "step": 1170 + }, + { + "epoch": 2.601499583449042, + "grad_norm": 0.06531214714050293, + "learning_rate": 0.00044197530864197536, + "loss": 5.6332, + "step": 1171 + }, + { + "epoch": 2.6037211885587337, + "grad_norm": 0.05225894972681999, + "learning_rate": 0.0004395061728395062, + "loss": 6.0085, + "step": 1172 + }, + { + "epoch": 2.6059427936684254, + "grad_norm": 0.0548102930188179, + "learning_rate": 0.00043703703703703705, + "loss": 5.6063, + "step": 1173 + }, + { + "epoch": 2.608164398778117, + "grad_norm": 0.05640947446227074, + "learning_rate": 0.0004345679012345679, + "loss": 5.7361, + "step": 1174 + }, + { + "epoch": 2.610386003887809, + "grad_norm": 0.06905799359083176, + "learning_rate": 0.0004320987654320988, + "loss": 5.7613, + "step": 1175 + }, + { + "epoch": 2.6126076089975006, + "grad_norm": 0.04869680106639862, + "learning_rate": 0.00042962962962962963, + "loss": 5.8906, + "step": 1176 + }, + { + "epoch": 2.6148292141071927, + "grad_norm": 0.04864457622170448, + "learning_rate": 0.0004271604938271605, + "loss": 5.3902, + "step": 1177 + }, + { + "epoch": 2.6170508192168844, + "grad_norm": 0.05964788794517517, + "learning_rate": 0.0004246913580246913, + "loss": 5.5457, + "step": 1178 + }, + { + "epoch": 2.619272424326576, + "grad_norm": 0.04340321198105812, + "learning_rate": 0.00042222222222222227, + "loss": 5.9072, + "step": 1179 + }, + { + "epoch": 2.621494029436268, + "grad_norm": 0.04552336782217026, + "learning_rate": 0.0004197530864197531, + "loss": 6.0247, + "step": 1180 + }, + { + "epoch": 2.6237156345459596, + "grad_norm": 0.04501795768737793, + "learning_rate": 0.00041728395061728396, + "loss": 5.7657, + "step": 1181 + }, + { + "epoch": 2.6259372396556513, + "grad_norm": 0.09421583265066147, + "learning_rate": 0.0004148148148148148, + "loss": 6.128, + "step": 1182 + }, + { + "epoch": 2.628158844765343, + "grad_norm": 0.04200819879770279, + "learning_rate": 0.0004123456790123457, + "loss": 6.1594, + "step": 1183 + }, + { + "epoch": 2.6303804498750347, + "grad_norm": 0.05063186213374138, + "learning_rate": 0.00040987654320987654, + "loss": 5.5544, + "step": 1184 + }, + { + "epoch": 2.6326020549847264, + "grad_norm": 0.04634084552526474, + "learning_rate": 0.0004074074074074074, + "loss": 5.7918, + "step": 1185 + }, + { + "epoch": 2.634823660094418, + "grad_norm": 0.05453971028327942, + "learning_rate": 0.00040493827160493833, + "loss": 5.9909, + "step": 1186 + }, + { + "epoch": 2.63704526520411, + "grad_norm": 0.0714196115732193, + "learning_rate": 0.0004024691358024692, + "loss": 5.9813, + "step": 1187 + }, + { + "epoch": 2.6392668703138016, + "grad_norm": 0.04862188175320625, + "learning_rate": 0.0004, + "loss": 5.8212, + "step": 1188 + }, + { + "epoch": 2.6414884754234933, + "grad_norm": 0.1108190268278122, + "learning_rate": 0.00039753086419753086, + "loss": 5.4844, + "step": 1189 + }, + { + "epoch": 2.643710080533185, + "grad_norm": 0.05084674805402756, + "learning_rate": 0.00039506172839506176, + "loss": 5.8944, + "step": 1190 + }, + { + "epoch": 2.6459316856428767, + "grad_norm": 0.04934384301304817, + "learning_rate": 0.0003925925925925926, + "loss": 6.0849, + "step": 1191 + }, + { + "epoch": 2.648153290752569, + "grad_norm": 0.06427028030157089, + "learning_rate": 0.00039012345679012345, + "loss": 6.0675, + "step": 1192 + }, + { + "epoch": 2.6503748958622606, + "grad_norm": 0.05401918292045593, + "learning_rate": 0.0003876543209876543, + "loss": 6.1425, + "step": 1193 + }, + { + "epoch": 2.6525965009719523, + "grad_norm": 0.08267608284950256, + "learning_rate": 0.00038518518518518524, + "loss": 5.8855, + "step": 1194 + }, + { + "epoch": 2.654818106081644, + "grad_norm": 0.05584852769970894, + "learning_rate": 0.0003827160493827161, + "loss": 5.8688, + "step": 1195 + }, + { + "epoch": 2.6570397111913358, + "grad_norm": 0.06895650923252106, + "learning_rate": 0.0003802469135802469, + "loss": 6.1148, + "step": 1196 + }, + { + "epoch": 2.6592613163010275, + "grad_norm": 0.08543485403060913, + "learning_rate": 0.00037777777777777777, + "loss": 5.877, + "step": 1197 + }, + { + "epoch": 2.661482921410719, + "grad_norm": 0.08173643052577972, + "learning_rate": 0.00037530864197530867, + "loss": 5.9988, + "step": 1198 + }, + { + "epoch": 2.663704526520411, + "grad_norm": 0.05837205797433853, + "learning_rate": 0.0003728395061728395, + "loss": 5.8937, + "step": 1199 + }, + { + "epoch": 2.6659261316301026, + "grad_norm": 0.04911995306611061, + "learning_rate": 0.00037037037037037035, + "loss": 5.7209, + "step": 1200 + }, + { + "epoch": 2.6681477367397948, + "grad_norm": 0.06738217920064926, + "learning_rate": 0.00036790123456790125, + "loss": 6.1993, + "step": 1201 + }, + { + "epoch": 2.6703693418494865, + "grad_norm": 0.07234985381364822, + "learning_rate": 0.00036543209876543215, + "loss": 5.493, + "step": 1202 + }, + { + "epoch": 2.672590946959178, + "grad_norm": 0.07034603506326675, + "learning_rate": 0.000362962962962963, + "loss": 5.4333, + "step": 1203 + }, + { + "epoch": 2.67481255206887, + "grad_norm": 0.04224281385540962, + "learning_rate": 0.00036049382716049383, + "loss": 6.0671, + "step": 1204 + }, + { + "epoch": 2.6770341571785616, + "grad_norm": 0.06850730627775192, + "learning_rate": 0.0003580246913580247, + "loss": 5.9359, + "step": 1205 + }, + { + "epoch": 2.6792557622882534, + "grad_norm": 0.06015927717089653, + "learning_rate": 0.00035555555555555557, + "loss": 5.7879, + "step": 1206 + }, + { + "epoch": 2.681477367397945, + "grad_norm": 0.06330190598964691, + "learning_rate": 0.0003530864197530864, + "loss": 5.9427, + "step": 1207 + }, + { + "epoch": 2.683698972507637, + "grad_norm": 0.06587096303701401, + "learning_rate": 0.0003506172839506173, + "loss": 5.8831, + "step": 1208 + }, + { + "epoch": 2.6859205776173285, + "grad_norm": 0.04329574853181839, + "learning_rate": 0.00034814814814814816, + "loss": 5.8498, + "step": 1209 + }, + { + "epoch": 2.68814218272702, + "grad_norm": 0.06754133105278015, + "learning_rate": 0.00034567901234567905, + "loss": 6.0184, + "step": 1210 + }, + { + "epoch": 2.690363787836712, + "grad_norm": 0.06105200573801994, + "learning_rate": 0.0003432098765432099, + "loss": 5.7446, + "step": 1211 + }, + { + "epoch": 2.6925853929464036, + "grad_norm": 0.06230635941028595, + "learning_rate": 0.00034074074074074074, + "loss": 6.0185, + "step": 1212 + }, + { + "epoch": 2.6948069980560954, + "grad_norm": 0.08427722752094269, + "learning_rate": 0.0003382716049382716, + "loss": 5.7514, + "step": 1213 + }, + { + "epoch": 2.697028603165787, + "grad_norm": 0.05446156859397888, + "learning_rate": 0.0003358024691358025, + "loss": 5.9268, + "step": 1214 + }, + { + "epoch": 2.699250208275479, + "grad_norm": 0.07585136592388153, + "learning_rate": 0.0003333333333333333, + "loss": 6.0322, + "step": 1215 + }, + { + "epoch": 2.701471813385171, + "grad_norm": 0.05803457275032997, + "learning_rate": 0.0003308641975308642, + "loss": 5.8755, + "step": 1216 + }, + { + "epoch": 2.7036934184948627, + "grad_norm": 0.06280191987752914, + "learning_rate": 0.00032839506172839506, + "loss": 5.6669, + "step": 1217 + }, + { + "epoch": 2.7059150236045544, + "grad_norm": 0.05328867584466934, + "learning_rate": 0.0003259259259259259, + "loss": 5.7171, + "step": 1218 + }, + { + "epoch": 2.708136628714246, + "grad_norm": 0.06539945304393768, + "learning_rate": 0.0003234567901234568, + "loss": 6.1191, + "step": 1219 + }, + { + "epoch": 2.710358233823938, + "grad_norm": 0.06002845987677574, + "learning_rate": 0.00032098765432098765, + "loss": 5.8588, + "step": 1220 + }, + { + "epoch": 2.7125798389336295, + "grad_norm": 0.055796269327402115, + "learning_rate": 0.00031851851851851854, + "loss": 5.7848, + "step": 1221 + }, + { + "epoch": 2.7148014440433212, + "grad_norm": 0.055544886738061905, + "learning_rate": 0.0003160493827160494, + "loss": 5.5401, + "step": 1222 + }, + { + "epoch": 2.717023049153013, + "grad_norm": 0.05194031074643135, + "learning_rate": 0.0003135802469135803, + "loss": 5.7283, + "step": 1223 + }, + { + "epoch": 2.7192446542627047, + "grad_norm": 0.057613443583250046, + "learning_rate": 0.0003111111111111111, + "loss": 5.8282, + "step": 1224 + }, + { + "epoch": 2.7214662593723964, + "grad_norm": 0.060236454010009766, + "learning_rate": 0.00030864197530864197, + "loss": 5.8772, + "step": 1225 + }, + { + "epoch": 2.7236878644820885, + "grad_norm": 0.0555168054997921, + "learning_rate": 0.0003061728395061728, + "loss": 5.9074, + "step": 1226 + }, + { + "epoch": 2.7259094695917803, + "grad_norm": 0.04928433895111084, + "learning_rate": 0.0003037037037037037, + "loss": 6.0044, + "step": 1227 + }, + { + "epoch": 2.728131074701472, + "grad_norm": 0.07700496912002563, + "learning_rate": 0.00030123456790123455, + "loss": 6.0433, + "step": 1228 + }, + { + "epoch": 2.7303526798111637, + "grad_norm": 0.04453560337424278, + "learning_rate": 0.00029876543209876545, + "loss": 5.7116, + "step": 1229 + }, + { + "epoch": 2.7325742849208554, + "grad_norm": 0.06810157746076584, + "learning_rate": 0.0002962962962962963, + "loss": 5.4879, + "step": 1230 + }, + { + "epoch": 2.734795890030547, + "grad_norm": 0.059812840074300766, + "learning_rate": 0.0002938271604938272, + "loss": 5.8898, + "step": 1231 + }, + { + "epoch": 2.737017495140239, + "grad_norm": 0.06308246403932571, + "learning_rate": 0.00029135802469135803, + "loss": 5.6131, + "step": 1232 + }, + { + "epoch": 2.7392391002499306, + "grad_norm": 0.04889903962612152, + "learning_rate": 0.0002888888888888889, + "loss": 5.5685, + "step": 1233 + }, + { + "epoch": 2.7414607053596223, + "grad_norm": 0.05145961046218872, + "learning_rate": 0.00028641975308641977, + "loss": 5.9801, + "step": 1234 + }, + { + "epoch": 2.743682310469314, + "grad_norm": 0.0958176925778389, + "learning_rate": 0.0002839506172839506, + "loss": 5.6457, + "step": 1235 + }, + { + "epoch": 2.7459039155790057, + "grad_norm": 0.049419160932302475, + "learning_rate": 0.0002814814814814815, + "loss": 5.8724, + "step": 1236 + }, + { + "epoch": 2.7481255206886974, + "grad_norm": 0.07048829644918442, + "learning_rate": 0.00027901234567901236, + "loss": 5.9755, + "step": 1237 + }, + { + "epoch": 2.750347125798389, + "grad_norm": 0.048005539923906326, + "learning_rate": 0.00027654320987654325, + "loss": 5.6778, + "step": 1238 + }, + { + "epoch": 2.752568730908081, + "grad_norm": 0.05804963409900665, + "learning_rate": 0.0002740740740740741, + "loss": 5.8681, + "step": 1239 + }, + { + "epoch": 2.7547903360177726, + "grad_norm": 0.05638793855905533, + "learning_rate": 0.00027160493827160494, + "loss": 6.0751, + "step": 1240 + }, + { + "epoch": 2.7570119411274647, + "grad_norm": 0.047091707587242126, + "learning_rate": 0.0002691358024691358, + "loss": 5.8273, + "step": 1241 + }, + { + "epoch": 2.7592335462371564, + "grad_norm": 0.07317803055047989, + "learning_rate": 0.0002666666666666667, + "loss": 5.8787, + "step": 1242 + }, + { + "epoch": 2.761455151346848, + "grad_norm": 0.05119254067540169, + "learning_rate": 0.0002641975308641975, + "loss": 5.8983, + "step": 1243 + }, + { + "epoch": 2.76367675645654, + "grad_norm": 0.05338507890701294, + "learning_rate": 0.0002617283950617284, + "loss": 5.8812, + "step": 1244 + }, + { + "epoch": 2.7658983615662316, + "grad_norm": 0.04814758151769638, + "learning_rate": 0.00025925925925925926, + "loss": 5.9619, + "step": 1245 + }, + { + "epoch": 2.7681199666759233, + "grad_norm": 0.06921735405921936, + "learning_rate": 0.00025679012345679016, + "loss": 5.4222, + "step": 1246 + }, + { + "epoch": 2.770341571785615, + "grad_norm": 0.05429822951555252, + "learning_rate": 0.000254320987654321, + "loss": 5.7432, + "step": 1247 + }, + { + "epoch": 2.7725631768953067, + "grad_norm": 0.07386202365159988, + "learning_rate": 0.00025185185185185185, + "loss": 5.8407, + "step": 1248 + }, + { + "epoch": 2.7747847820049985, + "grad_norm": 0.07264435291290283, + "learning_rate": 0.0002493827160493827, + "loss": 5.9617, + "step": 1249 + }, + { + "epoch": 2.7770063871146906, + "grad_norm": 0.05628679692745209, + "learning_rate": 0.0002469135802469136, + "loss": 6.1518, + "step": 1250 + }, + { + "epoch": 2.7792279922243823, + "grad_norm": 0.0481853187084198, + "learning_rate": 0.0002444444444444445, + "loss": 5.9351, + "step": 1251 + }, + { + "epoch": 2.781449597334074, + "grad_norm": 0.04323854297399521, + "learning_rate": 0.0002419753086419753, + "loss": 6.051, + "step": 1252 + }, + { + "epoch": 2.7836712024437658, + "grad_norm": 0.06301230937242508, + "learning_rate": 0.0002395061728395062, + "loss": 5.686, + "step": 1253 + }, + { + "epoch": 2.7858928075534575, + "grad_norm": 0.05843862518668175, + "learning_rate": 0.00023703703703703704, + "loss": 5.8487, + "step": 1254 + }, + { + "epoch": 2.788114412663149, + "grad_norm": 0.05112096294760704, + "learning_rate": 0.0002345679012345679, + "loss": 5.7456, + "step": 1255 + }, + { + "epoch": 2.790336017772841, + "grad_norm": 0.06781143695116043, + "learning_rate": 0.00023209876543209875, + "loss": 5.8486, + "step": 1256 + }, + { + "epoch": 2.7925576228825326, + "grad_norm": 0.04605870693922043, + "learning_rate": 0.00022962962962962965, + "loss": 5.8111, + "step": 1257 + }, + { + "epoch": 2.7947792279922243, + "grad_norm": 0.058881402015686035, + "learning_rate": 0.0002271604938271605, + "loss": 6.2209, + "step": 1258 + }, + { + "epoch": 2.797000833101916, + "grad_norm": 0.0564490482211113, + "learning_rate": 0.00022469135802469136, + "loss": 5.887, + "step": 1259 + }, + { + "epoch": 2.7992224382116078, + "grad_norm": 0.06960400193929672, + "learning_rate": 0.0002222222222222222, + "loss": 6.148, + "step": 1260 + }, + { + "epoch": 2.8014440433212995, + "grad_norm": 0.05847383290529251, + "learning_rate": 0.0002197530864197531, + "loss": 5.9894, + "step": 1261 + }, + { + "epoch": 2.803665648430991, + "grad_norm": 0.0737534686923027, + "learning_rate": 0.00021728395061728395, + "loss": 6.0911, + "step": 1262 + }, + { + "epoch": 2.805887253540683, + "grad_norm": 0.07557881623506546, + "learning_rate": 0.00021481481481481482, + "loss": 5.7048, + "step": 1263 + }, + { + "epoch": 2.8081088586503746, + "grad_norm": 0.06748920679092407, + "learning_rate": 0.00021234567901234566, + "loss": 6.051, + "step": 1264 + }, + { + "epoch": 2.810330463760067, + "grad_norm": 0.05918078124523163, + "learning_rate": 0.00020987654320987656, + "loss": 5.9721, + "step": 1265 + }, + { + "epoch": 2.8125520688697585, + "grad_norm": 0.06494231522083282, + "learning_rate": 0.0002074074074074074, + "loss": 5.8291, + "step": 1266 + }, + { + "epoch": 2.81477367397945, + "grad_norm": 0.05254039913415909, + "learning_rate": 0.00020493827160493827, + "loss": 5.9545, + "step": 1267 + }, + { + "epoch": 2.816995279089142, + "grad_norm": 0.05591006949543953, + "learning_rate": 0.00020246913580246917, + "loss": 5.7866, + "step": 1268 + }, + { + "epoch": 2.8192168841988337, + "grad_norm": 0.05167578533291817, + "learning_rate": 0.0002, + "loss": 5.8748, + "step": 1269 + }, + { + "epoch": 2.8214384893085254, + "grad_norm": 0.08402105420827866, + "learning_rate": 0.00019753086419753088, + "loss": 5.6071, + "step": 1270 + }, + { + "epoch": 2.823660094418217, + "grad_norm": 0.05030294507741928, + "learning_rate": 0.00019506172839506172, + "loss": 6.0468, + "step": 1271 + }, + { + "epoch": 2.825881699527909, + "grad_norm": 0.05387067794799805, + "learning_rate": 0.00019259259259259262, + "loss": 5.9581, + "step": 1272 + }, + { + "epoch": 2.8281033046376005, + "grad_norm": 0.04881101846694946, + "learning_rate": 0.00019012345679012346, + "loss": 5.703, + "step": 1273 + }, + { + "epoch": 2.8303249097472927, + "grad_norm": 0.05300934612751007, + "learning_rate": 0.00018765432098765433, + "loss": 5.7739, + "step": 1274 + }, + { + "epoch": 2.8325465148569844, + "grad_norm": 0.050771087408065796, + "learning_rate": 0.00018518518518518518, + "loss": 5.9551, + "step": 1275 + }, + { + "epoch": 2.834768119966676, + "grad_norm": 0.052648287266492844, + "learning_rate": 0.00018271604938271607, + "loss": 5.8995, + "step": 1276 + }, + { + "epoch": 2.836989725076368, + "grad_norm": 0.05061740055680275, + "learning_rate": 0.00018024691358024692, + "loss": 5.7913, + "step": 1277 + }, + { + "epoch": 2.8392113301860595, + "grad_norm": 0.04365115985274315, + "learning_rate": 0.00017777777777777779, + "loss": 5.823, + "step": 1278 + }, + { + "epoch": 2.8414329352957512, + "grad_norm": 0.06664296984672546, + "learning_rate": 0.00017530864197530866, + "loss": 6.2006, + "step": 1279 + }, + { + "epoch": 2.843654540405443, + "grad_norm": 0.055986661463975906, + "learning_rate": 0.00017283950617283953, + "loss": 5.8978, + "step": 1280 + }, + { + "epoch": 2.8458761455151347, + "grad_norm": 0.04974586144089699, + "learning_rate": 0.00017037037037037037, + "loss": 6.0127, + "step": 1281 + }, + { + "epoch": 2.8480977506248264, + "grad_norm": 0.06275002658367157, + "learning_rate": 0.00016790123456790124, + "loss": 6.3379, + "step": 1282 + }, + { + "epoch": 2.850319355734518, + "grad_norm": 0.05482464283704758, + "learning_rate": 0.0001654320987654321, + "loss": 5.7202, + "step": 1283 + }, + { + "epoch": 2.85254096084421, + "grad_norm": 0.056165117770433426, + "learning_rate": 0.00016296296296296295, + "loss": 5.7521, + "step": 1284 + }, + { + "epoch": 2.8547625659539015, + "grad_norm": 0.05828070640563965, + "learning_rate": 0.00016049382716049382, + "loss": 6.176, + "step": 1285 + }, + { + "epoch": 2.8569841710635933, + "grad_norm": 0.0738142803311348, + "learning_rate": 0.0001580246913580247, + "loss": 6.1232, + "step": 1286 + }, + { + "epoch": 2.859205776173285, + "grad_norm": 0.051991552114486694, + "learning_rate": 0.00015555555555555556, + "loss": 5.7182, + "step": 1287 + }, + { + "epoch": 2.8614273812829767, + "grad_norm": 0.06892450898885727, + "learning_rate": 0.0001530864197530864, + "loss": 6.0863, + "step": 1288 + }, + { + "epoch": 2.863648986392669, + "grad_norm": 0.04431318864226341, + "learning_rate": 0.00015061728395061728, + "loss": 6.0564, + "step": 1289 + }, + { + "epoch": 2.8658705915023606, + "grad_norm": 0.07410877197980881, + "learning_rate": 0.00014814814814814815, + "loss": 6.0874, + "step": 1290 + }, + { + "epoch": 2.8680921966120523, + "grad_norm": 0.05466211214661598, + "learning_rate": 0.00014567901234567902, + "loss": 5.9654, + "step": 1291 + }, + { + "epoch": 2.870313801721744, + "grad_norm": 0.045556023716926575, + "learning_rate": 0.00014320987654320989, + "loss": 5.8639, + "step": 1292 + }, + { + "epoch": 2.8725354068314357, + "grad_norm": 0.04874875396490097, + "learning_rate": 0.00014074074074074076, + "loss": 5.9451, + "step": 1293 + }, + { + "epoch": 2.8747570119411274, + "grad_norm": 0.05590863898396492, + "learning_rate": 0.00013827160493827163, + "loss": 6.0744, + "step": 1294 + }, + { + "epoch": 2.876978617050819, + "grad_norm": 0.05163506045937538, + "learning_rate": 0.00013580246913580247, + "loss": 5.6175, + "step": 1295 + }, + { + "epoch": 2.879200222160511, + "grad_norm": 0.058315426111221313, + "learning_rate": 0.00013333333333333334, + "loss": 5.7961, + "step": 1296 + }, + { + "epoch": 2.8814218272702026, + "grad_norm": 0.0603594146668911, + "learning_rate": 0.0001308641975308642, + "loss": 5.8225, + "step": 1297 + }, + { + "epoch": 2.8836434323798947, + "grad_norm": 0.06714392453432083, + "learning_rate": 0.00012839506172839508, + "loss": 5.9419, + "step": 1298 + }, + { + "epoch": 2.8858650374895864, + "grad_norm": 0.06128297373652458, + "learning_rate": 0.00012592592592592592, + "loss": 5.6193, + "step": 1299 + }, + { + "epoch": 2.888086642599278, + "grad_norm": 0.06052326411008835, + "learning_rate": 0.0001234567901234568, + "loss": 5.4785, + "step": 1300 + }, + { + "epoch": 2.89030824770897, + "grad_norm": 0.0529288649559021, + "learning_rate": 0.00012098765432098765, + "loss": 6.2424, + "step": 1301 + }, + { + "epoch": 2.8925298528186616, + "grad_norm": 0.05307260900735855, + "learning_rate": 0.00011851851851851852, + "loss": 5.9248, + "step": 1302 + }, + { + "epoch": 2.8947514579283533, + "grad_norm": 0.04504923149943352, + "learning_rate": 0.00011604938271604938, + "loss": 5.6298, + "step": 1303 + }, + { + "epoch": 2.896973063038045, + "grad_norm": 0.06184275075793266, + "learning_rate": 0.00011358024691358025, + "loss": 6.0494, + "step": 1304 + }, + { + "epoch": 2.8991946681477367, + "grad_norm": 0.06522642076015472, + "learning_rate": 0.0001111111111111111, + "loss": 5.9472, + "step": 1305 + }, + { + "epoch": 2.9014162732574285, + "grad_norm": 0.045491501688957214, + "learning_rate": 0.00010864197530864197, + "loss": 5.7783, + "step": 1306 + }, + { + "epoch": 2.90363787836712, + "grad_norm": 0.05264101177453995, + "learning_rate": 0.00010617283950617283, + "loss": 5.7188, + "step": 1307 + }, + { + "epoch": 2.905859483476812, + "grad_norm": 0.08518176525831223, + "learning_rate": 0.0001037037037037037, + "loss": 6.0351, + "step": 1308 + }, + { + "epoch": 2.9080810885865036, + "grad_norm": 0.06091070547699928, + "learning_rate": 0.00010123456790123458, + "loss": 5.5296, + "step": 1309 + }, + { + "epoch": 2.9103026936961953, + "grad_norm": 0.06074732169508934, + "learning_rate": 9.876543209876544e-05, + "loss": 5.6439, + "step": 1310 + }, + { + "epoch": 2.912524298805887, + "grad_norm": 0.04879772290587425, + "learning_rate": 9.629629629629631e-05, + "loss": 5.7684, + "step": 1311 + }, + { + "epoch": 2.9147459039155788, + "grad_norm": 0.09098317474126816, + "learning_rate": 9.382716049382717e-05, + "loss": 6.2529, + "step": 1312 + }, + { + "epoch": 2.916967509025271, + "grad_norm": 0.06702974438667297, + "learning_rate": 9.135802469135804e-05, + "loss": 5.9017, + "step": 1313 + }, + { + "epoch": 2.9191891141349626, + "grad_norm": 0.05466683581471443, + "learning_rate": 8.888888888888889e-05, + "loss": 5.8515, + "step": 1314 + }, + { + "epoch": 2.9214107192446543, + "grad_norm": 0.10034381598234177, + "learning_rate": 8.641975308641976e-05, + "loss": 5.572, + "step": 1315 + }, + { + "epoch": 2.923632324354346, + "grad_norm": 0.049551207572221756, + "learning_rate": 8.395061728395062e-05, + "loss": 5.622, + "step": 1316 + }, + { + "epoch": 2.9258539294640378, + "grad_norm": 0.06635282188653946, + "learning_rate": 8.148148148148148e-05, + "loss": 6.2147, + "step": 1317 + }, + { + "epoch": 2.9280755345737295, + "grad_norm": 0.0814097449183464, + "learning_rate": 7.901234567901235e-05, + "loss": 5.8872, + "step": 1318 + }, + { + "epoch": 2.930297139683421, + "grad_norm": 0.06352907419204712, + "learning_rate": 7.65432098765432e-05, + "loss": 5.875, + "step": 1319 + }, + { + "epoch": 2.932518744793113, + "grad_norm": 0.0544845350086689, + "learning_rate": 7.407407407407407e-05, + "loss": 5.9321, + "step": 1320 + }, + { + "epoch": 2.9347403499028046, + "grad_norm": 0.04383888095617294, + "learning_rate": 7.160493827160494e-05, + "loss": 6.0925, + "step": 1321 + }, + { + "epoch": 2.936961955012497, + "grad_norm": 0.06635189801454544, + "learning_rate": 6.913580246913581e-05, + "loss": 6.0666, + "step": 1322 + }, + { + "epoch": 2.9391835601221885, + "grad_norm": 0.06565090268850327, + "learning_rate": 6.666666666666667e-05, + "loss": 6.0449, + "step": 1323 + }, + { + "epoch": 2.9414051652318802, + "grad_norm": 0.04763912782073021, + "learning_rate": 6.419753086419754e-05, + "loss": 6.076, + "step": 1324 + }, + { + "epoch": 2.943626770341572, + "grad_norm": 0.07313438504934311, + "learning_rate": 6.17283950617284e-05, + "loss": 5.9441, + "step": 1325 + }, + { + "epoch": 2.9458483754512637, + "grad_norm": 0.07543905079364777, + "learning_rate": 5.925925925925926e-05, + "loss": 6.0067, + "step": 1326 + }, + { + "epoch": 2.9480699805609554, + "grad_norm": 0.057329267263412476, + "learning_rate": 5.679012345679012e-05, + "loss": 5.655, + "step": 1327 + }, + { + "epoch": 2.950291585670647, + "grad_norm": 0.04951634630560875, + "learning_rate": 5.4320987654320986e-05, + "loss": 5.9234, + "step": 1328 + }, + { + "epoch": 2.952513190780339, + "grad_norm": 0.06038312241435051, + "learning_rate": 5.185185185185185e-05, + "loss": 5.9723, + "step": 1329 + }, + { + "epoch": 2.9547347958900305, + "grad_norm": 0.05027232691645622, + "learning_rate": 4.938271604938272e-05, + "loss": 5.6912, + "step": 1330 + }, + { + "epoch": 2.9569564009997222, + "grad_norm": 0.056142378598451614, + "learning_rate": 4.691358024691358e-05, + "loss": 5.8446, + "step": 1331 + }, + { + "epoch": 2.959178006109414, + "grad_norm": 0.06986388564109802, + "learning_rate": 4.4444444444444447e-05, + "loss": 5.8146, + "step": 1332 + }, + { + "epoch": 2.9613996112191057, + "grad_norm": 0.059677302837371826, + "learning_rate": 4.197530864197531e-05, + "loss": 6.0107, + "step": 1333 + }, + { + "epoch": 2.9636212163287974, + "grad_norm": 0.06304765492677689, + "learning_rate": 3.950617283950617e-05, + "loss": 6.0351, + "step": 1334 + }, + { + "epoch": 2.965842821438489, + "grad_norm": 0.05687398463487625, + "learning_rate": 3.7037037037037037e-05, + "loss": 5.9451, + "step": 1335 + }, + { + "epoch": 2.968064426548181, + "grad_norm": 0.04591461271047592, + "learning_rate": 3.456790123456791e-05, + "loss": 6.1389, + "step": 1336 + }, + { + "epoch": 2.970286031657873, + "grad_norm": 0.06220898777246475, + "learning_rate": 3.209876543209877e-05, + "loss": 5.6631, + "step": 1337 + }, + { + "epoch": 2.9725076367675647, + "grad_norm": 0.04418789967894554, + "learning_rate": 2.962962962962963e-05, + "loss": 5.8768, + "step": 1338 + }, + { + "epoch": 2.9747292418772564, + "grad_norm": 0.05900184065103531, + "learning_rate": 2.7160493827160493e-05, + "loss": 5.945, + "step": 1339 + }, + { + "epoch": 2.976950846986948, + "grad_norm": 0.05201757326722145, + "learning_rate": 2.469135802469136e-05, + "loss": 5.9387, + "step": 1340 + }, + { + "epoch": 2.97917245209664, + "grad_norm": 0.05885840207338333, + "learning_rate": 2.2222222222222223e-05, + "loss": 5.904, + "step": 1341 + }, + { + "epoch": 2.9813940572063315, + "grad_norm": 0.05918167158961296, + "learning_rate": 1.9753086419753087e-05, + "loss": 5.8086, + "step": 1342 + }, + { + "epoch": 2.9836156623160233, + "grad_norm": 0.06754791736602783, + "learning_rate": 1.7283950617283953e-05, + "loss": 5.6532, + "step": 1343 + }, + { + "epoch": 2.985837267425715, + "grad_norm": 0.06648389250040054, + "learning_rate": 1.4814814814814815e-05, + "loss": 5.9772, + "step": 1344 + }, + { + "epoch": 2.9880588725354067, + "grad_norm": 0.05073931813240051, + "learning_rate": 1.234567901234568e-05, + "loss": 6.1126, + "step": 1345 + }, + { + "epoch": 2.990280477645099, + "grad_norm": 0.051233015954494476, + "learning_rate": 9.876543209876543e-06, + "loss": 5.8266, + "step": 1346 + }, + { + "epoch": 2.9925020827547906, + "grad_norm": 0.06040095537900925, + "learning_rate": 7.4074074074074075e-06, + "loss": 5.7803, + "step": 1347 + }, + { + "epoch": 2.9947236878644823, + "grad_norm": 0.04836644232273102, + "learning_rate": 4.938271604938272e-06, + "loss": 5.9451, + "step": 1348 + }, + { + "epoch": 2.996945292974174, + "grad_norm": 0.06376051902770996, + "learning_rate": 2.469135802469136e-06, + "loss": 5.7011, + "step": 1349 + }, + { + "epoch": 2.9991668980838657, + "grad_norm": 0.052688080817461014, + "learning_rate": 0.0, + "loss": 5.8671, + "step": 1350 + }, + { + "epoch": 2.9991668980838657, + "step": 1350, + "total_flos": 2.6133172282667827e+17, + "train_loss": 6.73684572007921, + "train_runtime": 2224.0851, + "train_samples_per_second": 9.715, + "train_steps_per_second": 0.607 + } + ], + "logging_steps": 1.0, + "max_steps": 1350, + "num_input_tokens_seen": 0, + "num_train_epochs": 3, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": true + }, + "attributes": {} + } + }, + "total_flos": 2.6133172282667827e+17, + "train_batch_size": 1, + "trial_name": null, + "trial_params": null +}