| { | |
| "best_global_step": 4000, | |
| "best_metric": 1.430210828781128, | |
| "best_model_checkpoint": "output/Tiger-generation/2025-07-18-18.00.46/checkpoint-4000", | |
| "epoch": 0.8167432363450741, | |
| "eval_steps": 1000, | |
| "global_step": 4000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.002041858090862685, | |
| "grad_norm": 40.58543014526367, | |
| "learning_rate": 1.2244897959183673e-07, | |
| "loss": 2.4785, | |
| "mean_token_accuracy": 0.525111411511898, | |
| "num_tokens": 69505.0, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.00408371618172537, | |
| "grad_norm": 42.50286102294922, | |
| "learning_rate": 2.5850340136054423e-07, | |
| "loss": 2.4665, | |
| "mean_token_accuracy": 0.5225918605923653, | |
| "num_tokens": 139531.0, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.006125574272588055, | |
| "grad_norm": 32.75865936279297, | |
| "learning_rate": 3.945578231292517e-07, | |
| "loss": 2.4605, | |
| "mean_token_accuracy": 0.5167869120836258, | |
| "num_tokens": 210870.0, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.00816743236345074, | |
| "grad_norm": 37.280799865722656, | |
| "learning_rate": 5.306122448979592e-07, | |
| "loss": 2.481, | |
| "mean_token_accuracy": 0.5240069389343261, | |
| "num_tokens": 281885.0, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.010209290454313425, | |
| "grad_norm": 35.95231628417969, | |
| "learning_rate": 6.666666666666667e-07, | |
| "loss": 2.4516, | |
| "mean_token_accuracy": 0.5276862233877182, | |
| "num_tokens": 351322.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01225114854517611, | |
| "grad_norm": 35.390323638916016, | |
| "learning_rate": 8.027210884353742e-07, | |
| "loss": 2.4629, | |
| "mean_token_accuracy": 0.5194140955805778, | |
| "num_tokens": 423235.0, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.014293006636038795, | |
| "grad_norm": 32.571537017822266, | |
| "learning_rate": 9.387755102040817e-07, | |
| "loss": 2.326, | |
| "mean_token_accuracy": 0.5447167232632637, | |
| "num_tokens": 494426.0, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.01633486472690148, | |
| "grad_norm": 51.0481071472168, | |
| "learning_rate": 1.0748299319727893e-06, | |
| "loss": 2.3607, | |
| "mean_token_accuracy": 0.5329553991556167, | |
| "num_tokens": 565410.0, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.018376722817764167, | |
| "grad_norm": 40.323486328125, | |
| "learning_rate": 1.2108843537414967e-06, | |
| "loss": 2.32, | |
| "mean_token_accuracy": 0.5369406580924988, | |
| "num_tokens": 637547.0, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.02041858090862685, | |
| "grad_norm": 41.64793014526367, | |
| "learning_rate": 1.3469387755102043e-06, | |
| "loss": 2.2872, | |
| "mean_token_accuracy": 0.5405925557017326, | |
| "num_tokens": 709374.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.022460438999489536, | |
| "grad_norm": 48.55975341796875, | |
| "learning_rate": 1.4829931972789117e-06, | |
| "loss": 2.2847, | |
| "mean_token_accuracy": 0.5352288991212845, | |
| "num_tokens": 779218.0, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.02450229709035222, | |
| "grad_norm": 38.39033889770508, | |
| "learning_rate": 1.6190476190476193e-06, | |
| "loss": 2.2336, | |
| "mean_token_accuracy": 0.5404456198215485, | |
| "num_tokens": 846940.0, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.026544155181214904, | |
| "grad_norm": 23.284969329833984, | |
| "learning_rate": 1.7551020408163267e-06, | |
| "loss": 2.1848, | |
| "mean_token_accuracy": 0.5559335589408875, | |
| "num_tokens": 921181.0, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.02858601327207759, | |
| "grad_norm": 31.96380615234375, | |
| "learning_rate": 1.8911564625850343e-06, | |
| "loss": 2.168, | |
| "mean_token_accuracy": 0.5423629283905029, | |
| "num_tokens": 990533.0, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.030627871362940276, | |
| "grad_norm": 38.387664794921875, | |
| "learning_rate": 2.0272108843537416e-06, | |
| "loss": 2.0985, | |
| "mean_token_accuracy": 0.5486705094575882, | |
| "num_tokens": 1060250.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.03266972945380296, | |
| "grad_norm": 30.54590606689453, | |
| "learning_rate": 2.1632653061224495e-06, | |
| "loss": 2.1418, | |
| "mean_token_accuracy": 0.5525166988372803, | |
| "num_tokens": 1132379.0, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.03471158754466565, | |
| "grad_norm": 32.12227249145508, | |
| "learning_rate": 2.299319727891157e-06, | |
| "loss": 2.1179, | |
| "mean_token_accuracy": 0.5542925208806991, | |
| "num_tokens": 1208198.0, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.036753445635528334, | |
| "grad_norm": 38.035926818847656, | |
| "learning_rate": 2.4353741496598642e-06, | |
| "loss": 2.0651, | |
| "mean_token_accuracy": 0.5671319425106048, | |
| "num_tokens": 1279054.0, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.03879530372639101, | |
| "grad_norm": 25.86829376220703, | |
| "learning_rate": 2.571428571428571e-06, | |
| "loss": 2.1026, | |
| "mean_token_accuracy": 0.5436463728547096, | |
| "num_tokens": 1347862.0, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.0408371618172537, | |
| "grad_norm": 40.36919021606445, | |
| "learning_rate": 2.707482993197279e-06, | |
| "loss": 1.98, | |
| "mean_token_accuracy": 0.5548223063349724, | |
| "num_tokens": 1417594.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.042879019908116385, | |
| "grad_norm": 28.774658203125, | |
| "learning_rate": 2.843537414965987e-06, | |
| "loss": 1.965, | |
| "mean_token_accuracy": 0.559009425342083, | |
| "num_tokens": 1489549.0, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.04492087799897907, | |
| "grad_norm": 21.973716735839844, | |
| "learning_rate": 2.979591836734694e-06, | |
| "loss": 1.9638, | |
| "mean_token_accuracy": 0.5593225166201592, | |
| "num_tokens": 1555401.0, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.04696273608984176, | |
| "grad_norm": 36.374786376953125, | |
| "learning_rate": 3.1156462585034016e-06, | |
| "loss": 1.9379, | |
| "mean_token_accuracy": 0.5614111497998238, | |
| "num_tokens": 1627148.0, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.04900459418070444, | |
| "grad_norm": 30.232772827148438, | |
| "learning_rate": 3.251700680272109e-06, | |
| "loss": 1.9311, | |
| "mean_token_accuracy": 0.5598069429397583, | |
| "num_tokens": 1696743.0, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.05104645227156713, | |
| "grad_norm": 33.978477478027344, | |
| "learning_rate": 3.3877551020408168e-06, | |
| "loss": 1.8232, | |
| "mean_token_accuracy": 0.5703684508800506, | |
| "num_tokens": 1769142.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.05308831036242981, | |
| "grad_norm": 22.255043029785156, | |
| "learning_rate": 3.523809523809524e-06, | |
| "loss": 1.6812, | |
| "mean_token_accuracy": 0.5865129962563514, | |
| "num_tokens": 1843700.0, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.055130168453292494, | |
| "grad_norm": 24.115663528442383, | |
| "learning_rate": 3.6598639455782316e-06, | |
| "loss": 1.7255, | |
| "mean_token_accuracy": 0.5739775270223617, | |
| "num_tokens": 1916733.0, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.05717202654415518, | |
| "grad_norm": 26.15631866455078, | |
| "learning_rate": 3.795918367346939e-06, | |
| "loss": 1.7444, | |
| "mean_token_accuracy": 0.5637126505374909, | |
| "num_tokens": 1987487.0, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.059213884635017866, | |
| "grad_norm": 23.212749481201172, | |
| "learning_rate": 3.931972789115646e-06, | |
| "loss": 1.7016, | |
| "mean_token_accuracy": 0.5802351862192154, | |
| "num_tokens": 2061962.0, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.06125574272588055, | |
| "grad_norm": 24.3252010345459, | |
| "learning_rate": 4.068027210884354e-06, | |
| "loss": 1.7064, | |
| "mean_token_accuracy": 0.5761498302221298, | |
| "num_tokens": 2132628.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.06329760081674324, | |
| "grad_norm": 20.47394561767578, | |
| "learning_rate": 4.204081632653061e-06, | |
| "loss": 1.6628, | |
| "mean_token_accuracy": 0.5744958698749543, | |
| "num_tokens": 2202325.0, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.06533945890760592, | |
| "grad_norm": 19.049283981323242, | |
| "learning_rate": 4.340136054421769e-06, | |
| "loss": 1.61, | |
| "mean_token_accuracy": 0.598686158657074, | |
| "num_tokens": 2272383.0, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.06738131699846861, | |
| "grad_norm": 14.891511917114258, | |
| "learning_rate": 4.476190476190477e-06, | |
| "loss": 1.6495, | |
| "mean_token_accuracy": 0.5836592674255371, | |
| "num_tokens": 2343570.0, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.0694231750893313, | |
| "grad_norm": 23.719728469848633, | |
| "learning_rate": 4.612244897959184e-06, | |
| "loss": 1.5801, | |
| "mean_token_accuracy": 0.597952377796173, | |
| "num_tokens": 2412952.0, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.07146503318019398, | |
| "grad_norm": 24.924657821655273, | |
| "learning_rate": 4.7482993197278915e-06, | |
| "loss": 1.5314, | |
| "mean_token_accuracy": 0.5992678642272949, | |
| "num_tokens": 2486087.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.07350689127105667, | |
| "grad_norm": 19.125892639160156, | |
| "learning_rate": 4.884353741496599e-06, | |
| "loss": 1.5699, | |
| "mean_token_accuracy": 0.5989518314599991, | |
| "num_tokens": 2560105.0, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.07554874936191934, | |
| "grad_norm": 24.626413345336914, | |
| "learning_rate": 5.020408163265307e-06, | |
| "loss": 1.5849, | |
| "mean_token_accuracy": 0.5961738631129265, | |
| "num_tokens": 2635351.0, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.07759060745278203, | |
| "grad_norm": 16.234474182128906, | |
| "learning_rate": 5.156462585034014e-06, | |
| "loss": 1.6359, | |
| "mean_token_accuracy": 0.59544438123703, | |
| "num_tokens": 2708644.0, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.07963246554364471, | |
| "grad_norm": 15.393643379211426, | |
| "learning_rate": 5.292517006802722e-06, | |
| "loss": 1.6003, | |
| "mean_token_accuracy": 0.597255888581276, | |
| "num_tokens": 2782814.0, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.0816743236345074, | |
| "grad_norm": 18.73783302307129, | |
| "learning_rate": 5.428571428571429e-06, | |
| "loss": 1.6231, | |
| "mean_token_accuracy": 0.5859753489494324, | |
| "num_tokens": 2850906.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.08371618172537008, | |
| "grad_norm": 14.788663864135742, | |
| "learning_rate": 5.564625850340136e-06, | |
| "loss": 1.5492, | |
| "mean_token_accuracy": 0.6039781719446182, | |
| "num_tokens": 2923967.0, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.08575803981623277, | |
| "grad_norm": 16.760385513305664, | |
| "learning_rate": 5.700680272108844e-06, | |
| "loss": 1.5715, | |
| "mean_token_accuracy": 0.6139408111572265, | |
| "num_tokens": 2992994.0, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.08779989790709546, | |
| "grad_norm": 23.059823989868164, | |
| "learning_rate": 5.8367346938775515e-06, | |
| "loss": 1.5412, | |
| "mean_token_accuracy": 0.601115295290947, | |
| "num_tokens": 3063958.0, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.08984175599795814, | |
| "grad_norm": 21.973384857177734, | |
| "learning_rate": 5.972789115646259e-06, | |
| "loss": 1.5855, | |
| "mean_token_accuracy": 0.6108441263437271, | |
| "num_tokens": 3132005.0, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.09188361408882083, | |
| "grad_norm": 17.117069244384766, | |
| "learning_rate": 6.108843537414967e-06, | |
| "loss": 1.576, | |
| "mean_token_accuracy": 0.5952845871448517, | |
| "num_tokens": 3204170.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.09392547217968351, | |
| "grad_norm": 18.47603988647461, | |
| "learning_rate": 6.244897959183675e-06, | |
| "loss": 1.5333, | |
| "mean_token_accuracy": 0.6100009500980377, | |
| "num_tokens": 3275122.0, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.0959673302705462, | |
| "grad_norm": 20.058645248413086, | |
| "learning_rate": 6.380952380952381e-06, | |
| "loss": 1.4875, | |
| "mean_token_accuracy": 0.6178710669279098, | |
| "num_tokens": 3348601.0, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.09800918836140889, | |
| "grad_norm": 17.164993286132812, | |
| "learning_rate": 6.517006802721089e-06, | |
| "loss": 1.5212, | |
| "mean_token_accuracy": 0.6061257958412171, | |
| "num_tokens": 3420761.0, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.10005104645227157, | |
| "grad_norm": 15.247114181518555, | |
| "learning_rate": 6.653061224489797e-06, | |
| "loss": 1.5402, | |
| "mean_token_accuracy": 0.6126458764076232, | |
| "num_tokens": 3490967.0, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.10209290454313426, | |
| "grad_norm": 18.676435470581055, | |
| "learning_rate": 6.789115646258504e-06, | |
| "loss": 1.4995, | |
| "mean_token_accuracy": 0.6140502035617829, | |
| "num_tokens": 3564295.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.10413476263399694, | |
| "grad_norm": 19.05228042602539, | |
| "learning_rate": 6.925170068027211e-06, | |
| "loss": 1.5967, | |
| "mean_token_accuracy": 0.5993693619966507, | |
| "num_tokens": 3633837.0, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.10617662072485962, | |
| "grad_norm": 16.225095748901367, | |
| "learning_rate": 7.061224489795919e-06, | |
| "loss": 1.4627, | |
| "mean_token_accuracy": 0.6195647984743118, | |
| "num_tokens": 3707429.0, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.1082184788157223, | |
| "grad_norm": 17.119951248168945, | |
| "learning_rate": 7.197278911564627e-06, | |
| "loss": 1.4916, | |
| "mean_token_accuracy": 0.6147974878549576, | |
| "num_tokens": 3782215.0, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.11026033690658499, | |
| "grad_norm": 14.728631973266602, | |
| "learning_rate": 7.333333333333333e-06, | |
| "loss": 1.5828, | |
| "mean_token_accuracy": 0.6027207791805267, | |
| "num_tokens": 3856106.0, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.11230219499744767, | |
| "grad_norm": 13.182671546936035, | |
| "learning_rate": 7.469387755102041e-06, | |
| "loss": 1.5749, | |
| "mean_token_accuracy": 0.6024670600891113, | |
| "num_tokens": 3925174.0, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.11434405308831036, | |
| "grad_norm": 17.364179611206055, | |
| "learning_rate": 7.605442176870749e-06, | |
| "loss": 1.5689, | |
| "mean_token_accuracy": 0.606898409128189, | |
| "num_tokens": 3993338.0, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.11638591117917305, | |
| "grad_norm": 16.174150466918945, | |
| "learning_rate": 7.741496598639456e-06, | |
| "loss": 1.5548, | |
| "mean_token_accuracy": 0.6065890669822693, | |
| "num_tokens": 4063428.0, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.11842776927003573, | |
| "grad_norm": 17.375446319580078, | |
| "learning_rate": 7.877551020408164e-06, | |
| "loss": 1.4776, | |
| "mean_token_accuracy": 0.614559343457222, | |
| "num_tokens": 4135382.0, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.12046962736089842, | |
| "grad_norm": 17.446441650390625, | |
| "learning_rate": 8.013605442176871e-06, | |
| "loss": 1.5583, | |
| "mean_token_accuracy": 0.6063664674758911, | |
| "num_tokens": 4211679.0, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.1225114854517611, | |
| "grad_norm": 17.279420852661133, | |
| "learning_rate": 8.14965986394558e-06, | |
| "loss": 1.4835, | |
| "mean_token_accuracy": 0.6287278205156326, | |
| "num_tokens": 4277878.0, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.12455334354262379, | |
| "grad_norm": 16.633319854736328, | |
| "learning_rate": 8.285714285714287e-06, | |
| "loss": 1.5295, | |
| "mean_token_accuracy": 0.6134639114141465, | |
| "num_tokens": 4348459.0, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.12659520163348648, | |
| "grad_norm": 15.54926872253418, | |
| "learning_rate": 8.421768707482994e-06, | |
| "loss": 1.5368, | |
| "mean_token_accuracy": 0.612190106511116, | |
| "num_tokens": 4423381.0, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.12863705972434916, | |
| "grad_norm": 15.600358963012695, | |
| "learning_rate": 8.557823129251701e-06, | |
| "loss": 1.4858, | |
| "mean_token_accuracy": 0.6275644600391388, | |
| "num_tokens": 4493430.0, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.13067891781521185, | |
| "grad_norm": 15.166326522827148, | |
| "learning_rate": 8.69387755102041e-06, | |
| "loss": 1.5038, | |
| "mean_token_accuracy": 0.6171506643295288, | |
| "num_tokens": 4562553.0, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.13272077590607453, | |
| "grad_norm": 19.53791618347168, | |
| "learning_rate": 8.829931972789117e-06, | |
| "loss": 1.578, | |
| "mean_token_accuracy": 0.6041344046592713, | |
| "num_tokens": 4627736.0, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.13476263399693722, | |
| "grad_norm": 15.902642250061035, | |
| "learning_rate": 8.965986394557823e-06, | |
| "loss": 1.5097, | |
| "mean_token_accuracy": 0.6086627811193466, | |
| "num_tokens": 4701324.0, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.1368044920877999, | |
| "grad_norm": 16.800201416015625, | |
| "learning_rate": 9.102040816326532e-06, | |
| "loss": 1.5276, | |
| "mean_token_accuracy": 0.6193519622087479, | |
| "num_tokens": 4773920.0, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.1388463501786626, | |
| "grad_norm": 15.870624542236328, | |
| "learning_rate": 9.238095238095239e-06, | |
| "loss": 1.5096, | |
| "mean_token_accuracy": 0.6179488390684128, | |
| "num_tokens": 4846912.0, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.14088820826952528, | |
| "grad_norm": 15.414668083190918, | |
| "learning_rate": 9.374149659863946e-06, | |
| "loss": 1.5386, | |
| "mean_token_accuracy": 0.6122144073247909, | |
| "num_tokens": 4921846.0, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.14293006636038796, | |
| "grad_norm": 14.189248085021973, | |
| "learning_rate": 9.510204081632653e-06, | |
| "loss": 1.5427, | |
| "mean_token_accuracy": 0.5970522820949554, | |
| "num_tokens": 5000558.0, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.14497192445125065, | |
| "grad_norm": 12.32139778137207, | |
| "learning_rate": 9.646258503401362e-06, | |
| "loss": 1.4581, | |
| "mean_token_accuracy": 0.6264193892478943, | |
| "num_tokens": 5070767.0, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.14701378254211334, | |
| "grad_norm": 15.26685619354248, | |
| "learning_rate": 9.782312925170069e-06, | |
| "loss": 1.5367, | |
| "mean_token_accuracy": 0.6124130487442017, | |
| "num_tokens": 5142627.0, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.14905564063297602, | |
| "grad_norm": 15.513026237487793, | |
| "learning_rate": 9.918367346938776e-06, | |
| "loss": 1.5742, | |
| "mean_token_accuracy": 0.6043413370847702, | |
| "num_tokens": 5213337.0, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.15109749872383868, | |
| "grad_norm": 17.927433013916016, | |
| "learning_rate": 9.999999300105633e-06, | |
| "loss": 1.5007, | |
| "mean_token_accuracy": 0.6198744863271713, | |
| "num_tokens": 5284294.0, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.15313935681470137, | |
| "grad_norm": 14.790064811706543, | |
| "learning_rate": 9.999991426296246e-06, | |
| "loss": 1.4678, | |
| "mean_token_accuracy": 0.6264089643955231, | |
| "num_tokens": 5353839.0, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.15518121490556405, | |
| "grad_norm": 15.573653221130371, | |
| "learning_rate": 9.999974803823333e-06, | |
| "loss": 1.5332, | |
| "mean_token_accuracy": 0.614776486158371, | |
| "num_tokens": 5423944.0, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.15722307299642674, | |
| "grad_norm": 14.701098442077637, | |
| "learning_rate": 9.999949432715983e-06, | |
| "loss": 1.4416, | |
| "mean_token_accuracy": 0.6279941260814667, | |
| "num_tokens": 5497356.0, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.15926493108728942, | |
| "grad_norm": 14.240693092346191, | |
| "learning_rate": 9.999915313018586e-06, | |
| "loss": 1.5334, | |
| "mean_token_accuracy": 0.6209155797958374, | |
| "num_tokens": 5568918.0, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.1613067891781521, | |
| "grad_norm": 15.73400592803955, | |
| "learning_rate": 9.999872444790843e-06, | |
| "loss": 1.4824, | |
| "mean_token_accuracy": 0.6152042269706726, | |
| "num_tokens": 5638113.0, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.1633486472690148, | |
| "grad_norm": 17.401098251342773, | |
| "learning_rate": 9.999820828107761e-06, | |
| "loss": 1.478, | |
| "mean_token_accuracy": 0.6273152261972428, | |
| "num_tokens": 5710578.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.16539050535987748, | |
| "grad_norm": 13.625336647033691, | |
| "learning_rate": 9.999760463059657e-06, | |
| "loss": 1.4973, | |
| "mean_token_accuracy": 0.6253129422664643, | |
| "num_tokens": 5782260.0, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.16743236345074017, | |
| "grad_norm": 15.314433097839355, | |
| "learning_rate": 9.999691349752154e-06, | |
| "loss": 1.4774, | |
| "mean_token_accuracy": 0.6250138610601426, | |
| "num_tokens": 5853512.0, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.16947422154160285, | |
| "grad_norm": 14.873124122619629, | |
| "learning_rate": 9.999613488306182e-06, | |
| "loss": 1.4189, | |
| "mean_token_accuracy": 0.621257945895195, | |
| "num_tokens": 5924369.0, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.17151607963246554, | |
| "grad_norm": 17.970434188842773, | |
| "learning_rate": 9.999526878857977e-06, | |
| "loss": 1.5224, | |
| "mean_token_accuracy": 0.6051260381937027, | |
| "num_tokens": 5998837.0, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.17355793772332823, | |
| "grad_norm": 14.060564041137695, | |
| "learning_rate": 9.999431521559081e-06, | |
| "loss": 1.4957, | |
| "mean_token_accuracy": 0.6203907072544098, | |
| "num_tokens": 6069353.0, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.1755997958141909, | |
| "grad_norm": 15.269739151000977, | |
| "learning_rate": 9.99932741657635e-06, | |
| "loss": 1.571, | |
| "mean_token_accuracy": 0.606784787774086, | |
| "num_tokens": 6142037.0, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.1776416539050536, | |
| "grad_norm": 13.99073600769043, | |
| "learning_rate": 9.999214564091934e-06, | |
| "loss": 1.4143, | |
| "mean_token_accuracy": 0.630013570189476, | |
| "num_tokens": 6215390.0, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.17968351199591628, | |
| "grad_norm": 15.911304473876953, | |
| "learning_rate": 9.999092964303299e-06, | |
| "loss": 1.4837, | |
| "mean_token_accuracy": 0.6139797419309616, | |
| "num_tokens": 6288877.0, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.18172537008677897, | |
| "grad_norm": 14.30933666229248, | |
| "learning_rate": 9.99896261742321e-06, | |
| "loss": 1.477, | |
| "mean_token_accuracy": 0.6204395204782486, | |
| "num_tokens": 6360567.0, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.18376722817764166, | |
| "grad_norm": 15.878334045410156, | |
| "learning_rate": 9.998823523679739e-06, | |
| "loss": 1.4763, | |
| "mean_token_accuracy": 0.6281387954950333, | |
| "num_tokens": 6431133.0, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.18580908626850434, | |
| "grad_norm": 14.967886924743652, | |
| "learning_rate": 9.998675683316266e-06, | |
| "loss": 1.486, | |
| "mean_token_accuracy": 0.6245438635349274, | |
| "num_tokens": 6502623.0, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.18785094435936703, | |
| "grad_norm": 16.480722427368164, | |
| "learning_rate": 9.99851909659147e-06, | |
| "loss": 1.4937, | |
| "mean_token_accuracy": 0.625952273607254, | |
| "num_tokens": 6571994.0, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.18989280245022971, | |
| "grad_norm": 13.536521911621094, | |
| "learning_rate": 9.99835376377934e-06, | |
| "loss": 1.4284, | |
| "mean_token_accuracy": 0.6304761558771134, | |
| "num_tokens": 6645698.0, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.1919346605410924, | |
| "grad_norm": 13.15491008758545, | |
| "learning_rate": 9.99817968516916e-06, | |
| "loss": 1.4822, | |
| "mean_token_accuracy": 0.611976045370102, | |
| "num_tokens": 6720389.0, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.19397651863195509, | |
| "grad_norm": 15.53482723236084, | |
| "learning_rate": 9.997996861065526e-06, | |
| "loss": 1.4947, | |
| "mean_token_accuracy": 0.6212883561849594, | |
| "num_tokens": 6790853.0, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.19601837672281777, | |
| "grad_norm": 14.705411911010742, | |
| "learning_rate": 9.99780529178833e-06, | |
| "loss": 1.489, | |
| "mean_token_accuracy": 0.6153444766998291, | |
| "num_tokens": 6867474.0, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.19806023481368046, | |
| "grad_norm": 12.790360450744629, | |
| "learning_rate": 9.997604977672766e-06, | |
| "loss": 1.4949, | |
| "mean_token_accuracy": 0.6196657717227936, | |
| "num_tokens": 6936835.0, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.20010209290454314, | |
| "grad_norm": 13.104793548583984, | |
| "learning_rate": 9.997395919069333e-06, | |
| "loss": 1.4875, | |
| "mean_token_accuracy": 0.6339592069387436, | |
| "num_tokens": 7002460.0, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.20214395099540583, | |
| "grad_norm": 14.803412437438965, | |
| "learning_rate": 9.997178116343827e-06, | |
| "loss": 1.4656, | |
| "mean_token_accuracy": 0.6147948563098907, | |
| "num_tokens": 7077503.0, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.20418580908626852, | |
| "grad_norm": 13.249740600585938, | |
| "learning_rate": 9.996951569877347e-06, | |
| "loss": 1.4248, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.20418580908626852, | |
| "eval_loss": 1.4924129247665405, | |
| "eval_mean_token_accuracy": 0.6186948751602743, | |
| "eval_num_tokens": 7148003.0, | |
| "eval_runtime": 989.6753, | |
| "eval_samples_per_second": 4.996, | |
| "eval_steps_per_second": 1.249, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2062276671771312, | |
| "grad_norm": 14.814459800720215, | |
| "learning_rate": 9.996716280066288e-06, | |
| "loss": 1.5102, | |
| "mean_token_accuracy": 0.628528282046318, | |
| "num_tokens": 7219174.0, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.2082695252679939, | |
| "grad_norm": 13.832090377807617, | |
| "learning_rate": 9.996472247322345e-06, | |
| "loss": 1.4791, | |
| "mean_token_accuracy": 0.6212698370218277, | |
| "num_tokens": 7290848.0, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.21031138335885655, | |
| "grad_norm": 14.368582725524902, | |
| "learning_rate": 9.996219472072508e-06, | |
| "loss": 1.4015, | |
| "mean_token_accuracy": 0.6343317806720734, | |
| "num_tokens": 7361488.0, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.21235324144971923, | |
| "grad_norm": 14.122415542602539, | |
| "learning_rate": 9.995957954759073e-06, | |
| "loss": 1.5137, | |
| "mean_token_accuracy": 0.6238163977861404, | |
| "num_tokens": 7432355.0, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.21439509954058192, | |
| "grad_norm": 13.9231538772583, | |
| "learning_rate": 9.995687695839622e-06, | |
| "loss": 1.5704, | |
| "mean_token_accuracy": 0.6070802628993988, | |
| "num_tokens": 7499569.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.2164369576314446, | |
| "grad_norm": 14.253915786743164, | |
| "learning_rate": 9.995408695787038e-06, | |
| "loss": 1.4669, | |
| "mean_token_accuracy": 0.6284116208553314, | |
| "num_tokens": 7572280.0, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.2184788157223073, | |
| "grad_norm": 15.08125114440918, | |
| "learning_rate": 9.995120955089496e-06, | |
| "loss": 1.4687, | |
| "mean_token_accuracy": 0.6308166861534119, | |
| "num_tokens": 7644265.0, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.22052067381316998, | |
| "grad_norm": 15.171319007873535, | |
| "learning_rate": 9.994824474250468e-06, | |
| "loss": 1.4042, | |
| "mean_token_accuracy": 0.6392420619726181, | |
| "num_tokens": 7715965.0, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.22256253190403266, | |
| "grad_norm": 15.727775573730469, | |
| "learning_rate": 9.994519253788717e-06, | |
| "loss": 1.4386, | |
| "mean_token_accuracy": 0.6239751428365707, | |
| "num_tokens": 7786521.0, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.22460438999489535, | |
| "grad_norm": 13.040878295898438, | |
| "learning_rate": 9.994205294238297e-06, | |
| "loss": 1.4687, | |
| "mean_token_accuracy": 0.6325306296348572, | |
| "num_tokens": 7856697.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.22664624808575803, | |
| "grad_norm": 14.698892593383789, | |
| "learning_rate": 9.993882596148556e-06, | |
| "loss": 1.4996, | |
| "mean_token_accuracy": 0.6204117625951767, | |
| "num_tokens": 7925119.0, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.22868810617662072, | |
| "grad_norm": 15.8075590133667, | |
| "learning_rate": 9.993551160084128e-06, | |
| "loss": 1.5177, | |
| "mean_token_accuracy": 0.6046328634023667, | |
| "num_tokens": 7994808.0, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.2307299642674834, | |
| "grad_norm": 13.777670860290527, | |
| "learning_rate": 9.99321098662494e-06, | |
| "loss": 1.443, | |
| "mean_token_accuracy": 0.6219522416591644, | |
| "num_tokens": 8066587.0, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.2327718223583461, | |
| "grad_norm": 14.019599914550781, | |
| "learning_rate": 9.992862076366203e-06, | |
| "loss": 1.4232, | |
| "mean_token_accuracy": 0.6349723309278488, | |
| "num_tokens": 8140601.0, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.23481368044920878, | |
| "grad_norm": 13.211136817932129, | |
| "learning_rate": 9.992504429918422e-06, | |
| "loss": 1.3774, | |
| "mean_token_accuracy": 0.6400786489248276, | |
| "num_tokens": 8211916.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.23685553854007146, | |
| "grad_norm": 15.216955184936523, | |
| "learning_rate": 9.992138047907382e-06, | |
| "loss": 1.4249, | |
| "mean_token_accuracy": 0.6270944118499756, | |
| "num_tokens": 8280425.0, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.23889739663093415, | |
| "grad_norm": 15.230463027954102, | |
| "learning_rate": 9.991762930974154e-06, | |
| "loss": 1.4987, | |
| "mean_token_accuracy": 0.6189264446496964, | |
| "num_tokens": 8348729.0, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.24093925472179684, | |
| "grad_norm": 13.841259956359863, | |
| "learning_rate": 9.991379079775093e-06, | |
| "loss": 1.4614, | |
| "mean_token_accuracy": 0.6259691953659058, | |
| "num_tokens": 8419931.0, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.24298111281265952, | |
| "grad_norm": 14.62938117980957, | |
| "learning_rate": 9.990986494981837e-06, | |
| "loss": 1.4555, | |
| "mean_token_accuracy": 0.6304128617048264, | |
| "num_tokens": 8493325.0, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.2450229709035222, | |
| "grad_norm": 14.569291114807129, | |
| "learning_rate": 9.990585177281312e-06, | |
| "loss": 1.4874, | |
| "mean_token_accuracy": 0.621327030658722, | |
| "num_tokens": 8563202.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.2470648289943849, | |
| "grad_norm": 14.350664138793945, | |
| "learning_rate": 9.990175127375708e-06, | |
| "loss": 1.4037, | |
| "mean_token_accuracy": 0.6393092036247253, | |
| "num_tokens": 8636785.0, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.24910668708524758, | |
| "grad_norm": 14.5271577835083, | |
| "learning_rate": 9.989756345982508e-06, | |
| "loss": 1.3848, | |
| "mean_token_accuracy": 0.6416534215211869, | |
| "num_tokens": 8713375.0, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.25114854517611024, | |
| "grad_norm": 14.138811111450195, | |
| "learning_rate": 9.989328833834472e-06, | |
| "loss": 1.4711, | |
| "mean_token_accuracy": 0.6263648480176925, | |
| "num_tokens": 8786119.0, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.25319040326697295, | |
| "grad_norm": 11.65623664855957, | |
| "learning_rate": 9.98889259167963e-06, | |
| "loss": 1.435, | |
| "mean_token_accuracy": 0.6322738409042359, | |
| "num_tokens": 8858592.0, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.2552322613578356, | |
| "grad_norm": 15.826696395874023, | |
| "learning_rate": 9.98844762028129e-06, | |
| "loss": 1.441, | |
| "mean_token_accuracy": 0.6359235972166062, | |
| "num_tokens": 8928180.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.2572741194486983, | |
| "grad_norm": 17.17336082458496, | |
| "learning_rate": 9.987993920418036e-06, | |
| "loss": 1.4623, | |
| "mean_token_accuracy": 0.6244770854711532, | |
| "num_tokens": 9001234.0, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.259315977539561, | |
| "grad_norm": 14.226313591003418, | |
| "learning_rate": 9.987531492883722e-06, | |
| "loss": 1.4608, | |
| "mean_token_accuracy": 0.6295191615819931, | |
| "num_tokens": 9077262.0, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.2613578356304237, | |
| "grad_norm": 14.853294372558594, | |
| "learning_rate": 9.987060338487475e-06, | |
| "loss": 1.4514, | |
| "mean_token_accuracy": 0.6238030642271042, | |
| "num_tokens": 9151546.0, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.26339969372128635, | |
| "grad_norm": 15.472508430480957, | |
| "learning_rate": 9.98658045805369e-06, | |
| "loss": 1.4623, | |
| "mean_token_accuracy": 0.6118121117353439, | |
| "num_tokens": 9224771.0, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.26544155181214907, | |
| "grad_norm": 14.506217002868652, | |
| "learning_rate": 9.98609185242203e-06, | |
| "loss": 1.4837, | |
| "mean_token_accuracy": 0.6259201139211654, | |
| "num_tokens": 9296598.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.2674834099030117, | |
| "grad_norm": 15.796782493591309, | |
| "learning_rate": 9.985594522447427e-06, | |
| "loss": 1.4381, | |
| "mean_token_accuracy": 0.6328779578208923, | |
| "num_tokens": 9367813.0, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.26952526799387444, | |
| "grad_norm": 16.872446060180664, | |
| "learning_rate": 9.985088469000075e-06, | |
| "loss": 1.4738, | |
| "mean_token_accuracy": 0.6319139033555985, | |
| "num_tokens": 9441995.0, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.2715671260847371, | |
| "grad_norm": 12.5946683883667, | |
| "learning_rate": 9.984573692965439e-06, | |
| "loss": 1.4395, | |
| "mean_token_accuracy": 0.6213337570428848, | |
| "num_tokens": 9512888.0, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.2736089841755998, | |
| "grad_norm": 15.489473342895508, | |
| "learning_rate": 9.984050195244233e-06, | |
| "loss": 1.4946, | |
| "mean_token_accuracy": 0.62346653342247, | |
| "num_tokens": 9583532.0, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.27565084226646247, | |
| "grad_norm": 16.15251922607422, | |
| "learning_rate": 9.983517976752447e-06, | |
| "loss": 1.5034, | |
| "mean_token_accuracy": 0.6158888429403305, | |
| "num_tokens": 9650656.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.2776927003573252, | |
| "grad_norm": 13.289031982421875, | |
| "learning_rate": 9.98297703842132e-06, | |
| "loss": 1.4061, | |
| "mean_token_accuracy": 0.6388078480958939, | |
| "num_tokens": 9724038.0, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.27973455844818784, | |
| "grad_norm": 14.971853256225586, | |
| "learning_rate": 9.982427381197349e-06, | |
| "loss": 1.3735, | |
| "mean_token_accuracy": 0.6322133481502533, | |
| "num_tokens": 9792860.0, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.28177641653905056, | |
| "grad_norm": 15.422203063964844, | |
| "learning_rate": 9.981869006042291e-06, | |
| "loss": 1.4731, | |
| "mean_token_accuracy": 0.6308643728494644, | |
| "num_tokens": 9863918.0, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.2838182746299132, | |
| "grad_norm": 14.272014617919922, | |
| "learning_rate": 9.981301913933154e-06, | |
| "loss": 1.404, | |
| "mean_token_accuracy": 0.6412104159593582, | |
| "num_tokens": 9937896.0, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.28586013272077593, | |
| "grad_norm": 14.743646621704102, | |
| "learning_rate": 9.980726105862202e-06, | |
| "loss": 1.4821, | |
| "mean_token_accuracy": 0.6105817914009094, | |
| "num_tokens": 10009650.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.2879019908116386, | |
| "grad_norm": 13.515847206115723, | |
| "learning_rate": 9.980141582836944e-06, | |
| "loss": 1.4952, | |
| "mean_token_accuracy": 0.6141276836395264, | |
| "num_tokens": 10080992.0, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.2899438489025013, | |
| "grad_norm": 12.919459342956543, | |
| "learning_rate": 9.979548345880142e-06, | |
| "loss": 1.445, | |
| "mean_token_accuracy": 0.6350033283233643, | |
| "num_tokens": 10152759.0, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.29198570699336396, | |
| "grad_norm": 14.708041191101074, | |
| "learning_rate": 9.978946396029802e-06, | |
| "loss": 1.45, | |
| "mean_token_accuracy": 0.6281395435333252, | |
| "num_tokens": 10222271.0, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.29402756508422667, | |
| "grad_norm": 13.272540092468262, | |
| "learning_rate": 9.97833573433918e-06, | |
| "loss": 1.428, | |
| "mean_token_accuracy": 0.6370621621608734, | |
| "num_tokens": 10294416.0, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.29606942317508933, | |
| "grad_norm": 12.510384559631348, | |
| "learning_rate": 9.97771636187677e-06, | |
| "loss": 1.4699, | |
| "mean_token_accuracy": 0.6264015674591065, | |
| "num_tokens": 10367596.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.29811128126595204, | |
| "grad_norm": 13.551581382751465, | |
| "learning_rate": 9.977088279726312e-06, | |
| "loss": 1.4212, | |
| "mean_token_accuracy": 0.6329251319169998, | |
| "num_tokens": 10441159.0, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.3001531393568147, | |
| "grad_norm": 19.371631622314453, | |
| "learning_rate": 9.976451488986785e-06, | |
| "loss": 1.4853, | |
| "mean_token_accuracy": 0.6176414400339126, | |
| "num_tokens": 10507615.0, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.30219499744767736, | |
| "grad_norm": 14.074532508850098, | |
| "learning_rate": 9.975805990772401e-06, | |
| "loss": 1.3653, | |
| "mean_token_accuracy": 0.6459196925163269, | |
| "num_tokens": 10578665.0, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.3042368555385401, | |
| "grad_norm": 14.489356994628906, | |
| "learning_rate": 9.975151786212614e-06, | |
| "loss": 1.4668, | |
| "mean_token_accuracy": 0.6257261455059051, | |
| "num_tokens": 10652077.0, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.30627871362940273, | |
| "grad_norm": 13.164527893066406, | |
| "learning_rate": 9.974488876452107e-06, | |
| "loss": 1.3998, | |
| "mean_token_accuracy": 0.6408104240894318, | |
| "num_tokens": 10723815.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.30832057172026545, | |
| "grad_norm": 13.87382698059082, | |
| "learning_rate": 9.973817262650798e-06, | |
| "loss": 1.464, | |
| "mean_token_accuracy": 0.6288924962282181, | |
| "num_tokens": 10794989.0, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.3103624298111281, | |
| "grad_norm": 14.771330833435059, | |
| "learning_rate": 9.973136945983832e-06, | |
| "loss": 1.378, | |
| "mean_token_accuracy": 0.6336318165063858, | |
| "num_tokens": 10867534.0, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.3124042879019908, | |
| "grad_norm": 15.911246299743652, | |
| "learning_rate": 9.972447927641588e-06, | |
| "loss": 1.4556, | |
| "mean_token_accuracy": 0.625057402253151, | |
| "num_tokens": 10936439.0, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.3144461459928535, | |
| "grad_norm": 14.86609172821045, | |
| "learning_rate": 9.971750208829661e-06, | |
| "loss": 1.4053, | |
| "mean_token_accuracy": 0.642861407995224, | |
| "num_tokens": 11005828.0, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.3164880040837162, | |
| "grad_norm": 15.707890510559082, | |
| "learning_rate": 9.971043790768878e-06, | |
| "loss": 1.4513, | |
| "mean_token_accuracy": 0.6278130769729614, | |
| "num_tokens": 11079668.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.31852986217457885, | |
| "grad_norm": 14.676016807556152, | |
| "learning_rate": 9.97032867469528e-06, | |
| "loss": 1.4306, | |
| "mean_token_accuracy": 0.6367242813110352, | |
| "num_tokens": 11155772.0, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.32057172026544156, | |
| "grad_norm": 13.341631889343262, | |
| "learning_rate": 9.969604861860133e-06, | |
| "loss": 1.4878, | |
| "mean_token_accuracy": 0.6201000392436982, | |
| "num_tokens": 11227505.0, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.3226135783563042, | |
| "grad_norm": 13.484333992004395, | |
| "learning_rate": 9.968872353529921e-06, | |
| "loss": 1.4509, | |
| "mean_token_accuracy": 0.6225223064422607, | |
| "num_tokens": 11297087.0, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.32465543644716693, | |
| "grad_norm": 14.83188533782959, | |
| "learning_rate": 9.968131150986337e-06, | |
| "loss": 1.4449, | |
| "mean_token_accuracy": 0.6267853021621704, | |
| "num_tokens": 11367600.0, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.3266972945380296, | |
| "grad_norm": 14.633095741271973, | |
| "learning_rate": 9.967381255526291e-06, | |
| "loss": 1.3779, | |
| "mean_token_accuracy": 0.6430699110031128, | |
| "num_tokens": 11440350.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.3287391526288923, | |
| "grad_norm": 14.898681640625, | |
| "learning_rate": 9.966622668461899e-06, | |
| "loss": 1.3796, | |
| "mean_token_accuracy": 0.6352171957492828, | |
| "num_tokens": 11508529.0, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.33078101071975496, | |
| "grad_norm": 13.815352439880371, | |
| "learning_rate": 9.965855391120492e-06, | |
| "loss": 1.4781, | |
| "mean_token_accuracy": 0.6252012938261032, | |
| "num_tokens": 11584229.0, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.3328228688106177, | |
| "grad_norm": 13.569513320922852, | |
| "learning_rate": 9.965079424844599e-06, | |
| "loss": 1.3532, | |
| "mean_token_accuracy": 0.6486948579549789, | |
| "num_tokens": 11653163.0, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.33486472690148034, | |
| "grad_norm": 14.037538528442383, | |
| "learning_rate": 9.964294770991958e-06, | |
| "loss": 1.4509, | |
| "mean_token_accuracy": 0.6179214477539062, | |
| "num_tokens": 11722399.0, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.33690658499234305, | |
| "grad_norm": 14.93041706085205, | |
| "learning_rate": 9.963501430935505e-06, | |
| "loss": 1.4345, | |
| "mean_token_accuracy": 0.6286086499691009, | |
| "num_tokens": 11795038.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.3389484430832057, | |
| "grad_norm": 14.412076950073242, | |
| "learning_rate": 9.962699406063374e-06, | |
| "loss": 1.4148, | |
| "mean_token_accuracy": 0.6350490599870682, | |
| "num_tokens": 11865144.0, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.3409903011740684, | |
| "grad_norm": 13.61820125579834, | |
| "learning_rate": 9.9618886977789e-06, | |
| "loss": 1.4613, | |
| "mean_token_accuracy": 0.6232977896928787, | |
| "num_tokens": 11936312.0, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.3430321592649311, | |
| "grad_norm": 13.12203598022461, | |
| "learning_rate": 9.961069307500606e-06, | |
| "loss": 1.3669, | |
| "mean_token_accuracy": 0.6328762382268905, | |
| "num_tokens": 12005821.0, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.3450740173557938, | |
| "grad_norm": 13.293590545654297, | |
| "learning_rate": 9.960241236662209e-06, | |
| "loss": 1.3379, | |
| "mean_token_accuracy": 0.6402474969625473, | |
| "num_tokens": 12074453.0, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.34711587544665645, | |
| "grad_norm": 14.68148422241211, | |
| "learning_rate": 9.959404486712613e-06, | |
| "loss": 1.4089, | |
| "mean_token_accuracy": 0.6392355740070343, | |
| "num_tokens": 12142676.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.34915773353751917, | |
| "grad_norm": 14.123222351074219, | |
| "learning_rate": 9.958559059115911e-06, | |
| "loss": 1.4286, | |
| "mean_token_accuracy": 0.6322161883115769, | |
| "num_tokens": 12213513.0, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.3511995916283818, | |
| "grad_norm": 15.215718269348145, | |
| "learning_rate": 9.957704955351377e-06, | |
| "loss": 1.4943, | |
| "mean_token_accuracy": 0.6219414353370667, | |
| "num_tokens": 12283923.0, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.35324144971924454, | |
| "grad_norm": 13.124506950378418, | |
| "learning_rate": 9.956842176913467e-06, | |
| "loss": 1.4513, | |
| "mean_token_accuracy": 0.6365837067365646, | |
| "num_tokens": 12358198.0, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.3552833078101072, | |
| "grad_norm": 15.533964157104492, | |
| "learning_rate": 9.955970725311814e-06, | |
| "loss": 1.4831, | |
| "mean_token_accuracy": 0.6205825060606003, | |
| "num_tokens": 12428720.0, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.3573251659009699, | |
| "grad_norm": 15.942651748657227, | |
| "learning_rate": 9.95509060207123e-06, | |
| "loss": 1.4388, | |
| "mean_token_accuracy": 0.6281418234109879, | |
| "num_tokens": 12496646.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.35936702399183257, | |
| "grad_norm": 11.91575813293457, | |
| "learning_rate": 9.954201808731698e-06, | |
| "loss": 1.4251, | |
| "mean_token_accuracy": 0.624052032828331, | |
| "num_tokens": 12565166.0, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.3614088820826952, | |
| "grad_norm": 14.103416442871094, | |
| "learning_rate": 9.953304346848372e-06, | |
| "loss": 1.3823, | |
| "mean_token_accuracy": 0.6392663180828094, | |
| "num_tokens": 12636558.0, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.36345074017355794, | |
| "grad_norm": 13.818516731262207, | |
| "learning_rate": 9.952398217991569e-06, | |
| "loss": 1.4778, | |
| "mean_token_accuracy": 0.6283582001924515, | |
| "num_tokens": 12708824.0, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.3654925982644206, | |
| "grad_norm": 13.625807762145996, | |
| "learning_rate": 9.951483423746776e-06, | |
| "loss": 1.4254, | |
| "mean_token_accuracy": 0.6290278375148773, | |
| "num_tokens": 12778732.0, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.3675344563552833, | |
| "grad_norm": 14.069265365600586, | |
| "learning_rate": 9.950559965714647e-06, | |
| "loss": 1.4622, | |
| "mean_token_accuracy": 0.6254006132483483, | |
| "num_tokens": 12848482.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.36957631444614597, | |
| "grad_norm": 14.092951774597168, | |
| "learning_rate": 9.949627845510984e-06, | |
| "loss": 1.4371, | |
| "mean_token_accuracy": 0.6362253725528717, | |
| "num_tokens": 12919341.0, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.3716181725370087, | |
| "grad_norm": 14.89527702331543, | |
| "learning_rate": 9.948687064766753e-06, | |
| "loss": 1.4774, | |
| "mean_token_accuracy": 0.6274341195821762, | |
| "num_tokens": 12990953.0, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.37366003062787134, | |
| "grad_norm": 15.057639122009277, | |
| "learning_rate": 9.947737625128068e-06, | |
| "loss": 1.3877, | |
| "mean_token_accuracy": 0.6474756151437759, | |
| "num_tokens": 13061104.0, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.37570188871873406, | |
| "grad_norm": 13.421624183654785, | |
| "learning_rate": 9.946779528256204e-06, | |
| "loss": 1.4259, | |
| "mean_token_accuracy": 0.636455500125885, | |
| "num_tokens": 13132168.0, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.3777437468095967, | |
| "grad_norm": 13.868426322937012, | |
| "learning_rate": 9.945812775827573e-06, | |
| "loss": 1.4056, | |
| "mean_token_accuracy": 0.635055473446846, | |
| "num_tokens": 13202107.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.37978560490045943, | |
| "grad_norm": 13.789758682250977, | |
| "learning_rate": 9.944837369533737e-06, | |
| "loss": 1.4521, | |
| "mean_token_accuracy": 0.6354493737220764, | |
| "num_tokens": 13273691.0, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.3818274629913221, | |
| "grad_norm": 14.410371780395508, | |
| "learning_rate": 9.943853311081398e-06, | |
| "loss": 1.4376, | |
| "mean_token_accuracy": 0.6387281328439712, | |
| "num_tokens": 13343207.0, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.3838693210821848, | |
| "grad_norm": 13.178431510925293, | |
| "learning_rate": 9.9428606021924e-06, | |
| "loss": 1.4425, | |
| "mean_token_accuracy": 0.6428033947944641, | |
| "num_tokens": 13413824.0, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.38591117917304746, | |
| "grad_norm": 13.29395866394043, | |
| "learning_rate": 9.941859244603721e-06, | |
| "loss": 1.469, | |
| "mean_token_accuracy": 0.6198657006025314, | |
| "num_tokens": 13484476.0, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.38795303726391017, | |
| "grad_norm": 16.564151763916016, | |
| "learning_rate": 9.940849240067471e-06, | |
| "loss": 1.4815, | |
| "mean_token_accuracy": 0.6274239957332611, | |
| "num_tokens": 13558301.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.38999489535477283, | |
| "grad_norm": 13.39206600189209, | |
| "learning_rate": 9.939830590350895e-06, | |
| "loss": 1.4434, | |
| "mean_token_accuracy": 0.6292670220136642, | |
| "num_tokens": 13628067.0, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.39203675344563554, | |
| "grad_norm": 12.907182693481445, | |
| "learning_rate": 9.938803297236354e-06, | |
| "loss": 1.3603, | |
| "mean_token_accuracy": 0.6486239820718765, | |
| "num_tokens": 13701747.0, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.3940786115364982, | |
| "grad_norm": 14.168998718261719, | |
| "learning_rate": 9.937767362521343e-06, | |
| "loss": 1.4568, | |
| "mean_token_accuracy": 0.6249212712049484, | |
| "num_tokens": 13772760.0, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.3961204696273609, | |
| "grad_norm": 12.438258171081543, | |
| "learning_rate": 9.936722788018476e-06, | |
| "loss": 1.3811, | |
| "mean_token_accuracy": 0.6337268084287644, | |
| "num_tokens": 13842794.0, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.3981623277182236, | |
| "grad_norm": 14.198771476745605, | |
| "learning_rate": 9.935669575555478e-06, | |
| "loss": 1.4326, | |
| "mean_token_accuracy": 0.6176476001739502, | |
| "num_tokens": 13916581.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.4002041858090863, | |
| "grad_norm": 15.927424430847168, | |
| "learning_rate": 9.934607726975196e-06, | |
| "loss": 1.3843, | |
| "mean_token_accuracy": 0.645603460073471, | |
| "num_tokens": 13990796.0, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.40224604389994895, | |
| "grad_norm": 13.675074577331543, | |
| "learning_rate": 9.933537244135584e-06, | |
| "loss": 1.4744, | |
| "mean_token_accuracy": 0.6207163512706757, | |
| "num_tokens": 14058770.0, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.40428790199081166, | |
| "grad_norm": 14.225303649902344, | |
| "learning_rate": 9.9324581289097e-06, | |
| "loss": 1.3557, | |
| "mean_token_accuracy": 0.6434049308300018, | |
| "num_tokens": 14127780.0, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.4063297600816743, | |
| "grad_norm": 13.11447811126709, | |
| "learning_rate": 9.931370383185717e-06, | |
| "loss": 1.4163, | |
| "mean_token_accuracy": 0.6322284698486328, | |
| "num_tokens": 14199180.0, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.40837161817253703, | |
| "grad_norm": 13.91115951538086, | |
| "learning_rate": 9.930274008866898e-06, | |
| "loss": 1.4281, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.40837161817253703, | |
| "eval_loss": 1.4562746286392212, | |
| "eval_mean_token_accuracy": 0.626390626877064, | |
| "eval_num_tokens": 14272984.0, | |
| "eval_runtime": 992.5565, | |
| "eval_samples_per_second": 4.981, | |
| "eval_steps_per_second": 1.245, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.4104134762633997, | |
| "grad_norm": 12.996975898742676, | |
| "learning_rate": 9.92916900787161e-06, | |
| "loss": 1.4825, | |
| "mean_token_accuracy": 0.6286307215690613, | |
| "num_tokens": 14345206.0, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.4124553343542624, | |
| "grad_norm": 14.120156288146973, | |
| "learning_rate": 9.928055382133311e-06, | |
| "loss": 1.5034, | |
| "mean_token_accuracy": 0.6243163645267487, | |
| "num_tokens": 14419062.0, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.41449719244512506, | |
| "grad_norm": 13.214719772338867, | |
| "learning_rate": 9.926933133600554e-06, | |
| "loss": 1.4539, | |
| "mean_token_accuracy": 0.6286552488803864, | |
| "num_tokens": 14490124.0, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.4165390505359878, | |
| "grad_norm": 14.170852661132812, | |
| "learning_rate": 9.925802264236974e-06, | |
| "loss": 1.437, | |
| "mean_token_accuracy": 0.6257667273283005, | |
| "num_tokens": 14561490.0, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.41858090862685043, | |
| "grad_norm": 13.008140563964844, | |
| "learning_rate": 9.924662776021297e-06, | |
| "loss": 1.4631, | |
| "mean_token_accuracy": 0.6202176958322525, | |
| "num_tokens": 14634587.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.4206227667177131, | |
| "grad_norm": 12.924283027648926, | |
| "learning_rate": 9.923514670947326e-06, | |
| "loss": 1.4184, | |
| "mean_token_accuracy": 0.6351789355278015, | |
| "num_tokens": 14704346.0, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.4226646248085758, | |
| "grad_norm": 15.573639869689941, | |
| "learning_rate": 9.922357951023939e-06, | |
| "loss": 1.401, | |
| "mean_token_accuracy": 0.62881398499012, | |
| "num_tokens": 14775676.0, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.42470648289943846, | |
| "grad_norm": 13.74057388305664, | |
| "learning_rate": 9.92119261827509e-06, | |
| "loss": 1.392, | |
| "mean_token_accuracy": 0.6317652136087417, | |
| "num_tokens": 14846972.0, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.4267483409903012, | |
| "grad_norm": 17.23077964782715, | |
| "learning_rate": 9.920018674739808e-06, | |
| "loss": 1.4939, | |
| "mean_token_accuracy": 0.6289480417966843, | |
| "num_tokens": 14921965.0, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.42879019908116384, | |
| "grad_norm": 14.922390937805176, | |
| "learning_rate": 9.918836122472178e-06, | |
| "loss": 1.4451, | |
| "mean_token_accuracy": 0.6285185784101486, | |
| "num_tokens": 14993212.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.43083205717202655, | |
| "grad_norm": 12.999784469604492, | |
| "learning_rate": 9.917644963541359e-06, | |
| "loss": 1.402, | |
| "mean_token_accuracy": 0.6414052009582519, | |
| "num_tokens": 15062067.0, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.4328739152628892, | |
| "grad_norm": 14.179859161376953, | |
| "learning_rate": 9.916445200031559e-06, | |
| "loss": 1.4342, | |
| "mean_token_accuracy": 0.6401284962892533, | |
| "num_tokens": 15134303.0, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.4349157733537519, | |
| "grad_norm": 13.935656547546387, | |
| "learning_rate": 9.915236834042052e-06, | |
| "loss": 1.4358, | |
| "mean_token_accuracy": 0.6319669187068939, | |
| "num_tokens": 15205632.0, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.4369576314446146, | |
| "grad_norm": 14.011245727539062, | |
| "learning_rate": 9.914019867687157e-06, | |
| "loss": 1.4334, | |
| "mean_token_accuracy": 0.6271157622337341, | |
| "num_tokens": 15276462.0, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.4389994895354773, | |
| "grad_norm": 15.309188842773438, | |
| "learning_rate": 9.912794303096244e-06, | |
| "loss": 1.4595, | |
| "mean_token_accuracy": 0.6256224781274795, | |
| "num_tokens": 15347114.0, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.44104134762633995, | |
| "grad_norm": 12.190061569213867, | |
| "learning_rate": 9.911560142413725e-06, | |
| "loss": 1.3799, | |
| "mean_token_accuracy": 0.6491726189851761, | |
| "num_tokens": 15422516.0, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.44308320571720267, | |
| "grad_norm": 14.980606079101562, | |
| "learning_rate": 9.910317387799058e-06, | |
| "loss": 1.3266, | |
| "mean_token_accuracy": 0.6502772122621536, | |
| "num_tokens": 15498472.0, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.4451250638080653, | |
| "grad_norm": 14.175018310546875, | |
| "learning_rate": 9.909066041426733e-06, | |
| "loss": 1.4373, | |
| "mean_token_accuracy": 0.6358803600072861, | |
| "num_tokens": 15566017.0, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.44716692189892804, | |
| "grad_norm": 12.999357223510742, | |
| "learning_rate": 9.907806105486278e-06, | |
| "loss": 1.4096, | |
| "mean_token_accuracy": 0.6316717475652694, | |
| "num_tokens": 15639585.0, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.4492087799897907, | |
| "grad_norm": 12.991114616394043, | |
| "learning_rate": 9.906537582182245e-06, | |
| "loss": 1.4713, | |
| "mean_token_accuracy": 0.6206406474113464, | |
| "num_tokens": 15713767.0, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.4512506380806534, | |
| "grad_norm": 16.122135162353516, | |
| "learning_rate": 9.905260473734215e-06, | |
| "loss": 1.3514, | |
| "mean_token_accuracy": 0.6418723970651626, | |
| "num_tokens": 15786377.0, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.45329249617151607, | |
| "grad_norm": 15.44117259979248, | |
| "learning_rate": 9.903974782376792e-06, | |
| "loss": 1.408, | |
| "mean_token_accuracy": 0.6281966835260391, | |
| "num_tokens": 15854713.0, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.4553343542623788, | |
| "grad_norm": 13.286972045898438, | |
| "learning_rate": 9.902680510359595e-06, | |
| "loss": 1.4129, | |
| "mean_token_accuracy": 0.6257620751857758, | |
| "num_tokens": 15929629.0, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.45737621235324144, | |
| "grad_norm": 13.401151657104492, | |
| "learning_rate": 9.901377659947257e-06, | |
| "loss": 1.4552, | |
| "mean_token_accuracy": 0.6300186216831207, | |
| "num_tokens": 15998948.0, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.45941807044410415, | |
| "grad_norm": 14.887855529785156, | |
| "learning_rate": 9.900066233419423e-06, | |
| "loss": 1.4324, | |
| "mean_token_accuracy": 0.6363310009241104, | |
| "num_tokens": 16068487.0, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.4614599285349668, | |
| "grad_norm": 15.288812637329102, | |
| "learning_rate": 9.898746233070744e-06, | |
| "loss": 1.5303, | |
| "mean_token_accuracy": 0.6137059539556503, | |
| "num_tokens": 16139564.0, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.4635017866258295, | |
| "grad_norm": 13.743548393249512, | |
| "learning_rate": 9.897417661210869e-06, | |
| "loss": 1.3417, | |
| "mean_token_accuracy": 0.6564855366945267, | |
| "num_tokens": 16205885.0, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.4655436447166922, | |
| "grad_norm": 13.705702781677246, | |
| "learning_rate": 9.89608052016445e-06, | |
| "loss": 1.4518, | |
| "mean_token_accuracy": 0.6158504068851471, | |
| "num_tokens": 16277491.0, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.4675855028075549, | |
| "grad_norm": 13.374293327331543, | |
| "learning_rate": 9.894734812271128e-06, | |
| "loss": 1.4323, | |
| "mean_token_accuracy": 0.6367385804653167, | |
| "num_tokens": 16349026.0, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.46962736089841756, | |
| "grad_norm": 14.050286293029785, | |
| "learning_rate": 9.89338053988554e-06, | |
| "loss": 1.3748, | |
| "mean_token_accuracy": 0.644825917482376, | |
| "num_tokens": 16419614.0, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.47166921898928027, | |
| "grad_norm": 12.7362060546875, | |
| "learning_rate": 9.8920177053773e-06, | |
| "loss": 1.3568, | |
| "mean_token_accuracy": 0.6447948306798935, | |
| "num_tokens": 16487160.0, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.47371107708014293, | |
| "grad_norm": 13.328147888183594, | |
| "learning_rate": 9.890646311131011e-06, | |
| "loss": 1.4306, | |
| "mean_token_accuracy": 0.616517773270607, | |
| "num_tokens": 16560163.0, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.47575293517100564, | |
| "grad_norm": 14.35744571685791, | |
| "learning_rate": 9.88926635954625e-06, | |
| "loss": 1.4315, | |
| "mean_token_accuracy": 0.6324984401464462, | |
| "num_tokens": 16633229.0, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.4777947932618683, | |
| "grad_norm": 14.388071060180664, | |
| "learning_rate": 9.887877853037568e-06, | |
| "loss": 1.4924, | |
| "mean_token_accuracy": 0.6160824686288834, | |
| "num_tokens": 16703383.0, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.47983665135273096, | |
| "grad_norm": 13.642940521240234, | |
| "learning_rate": 9.886480794034485e-06, | |
| "loss": 1.4676, | |
| "mean_token_accuracy": 0.6276875615119935, | |
| "num_tokens": 16778245.0, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.48187850944359367, | |
| "grad_norm": 14.308442115783691, | |
| "learning_rate": 9.885075184981484e-06, | |
| "loss": 1.3762, | |
| "mean_token_accuracy": 0.6397514760494232, | |
| "num_tokens": 16851438.0, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.48392036753445633, | |
| "grad_norm": 15.114699363708496, | |
| "learning_rate": 9.883661028338009e-06, | |
| "loss": 1.4637, | |
| "mean_token_accuracy": 0.6210479766130448, | |
| "num_tokens": 16921239.0, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.48596222562531904, | |
| "grad_norm": 16.276016235351562, | |
| "learning_rate": 9.88223832657846e-06, | |
| "loss": 1.4999, | |
| "mean_token_accuracy": 0.6109961330890655, | |
| "num_tokens": 16993286.0, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.4880040837161817, | |
| "grad_norm": 16.859468460083008, | |
| "learning_rate": 9.88080708219219e-06, | |
| "loss": 1.4623, | |
| "mean_token_accuracy": 0.6310494840145111, | |
| "num_tokens": 17066589.0, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.4900459418070444, | |
| "grad_norm": 15.107973098754883, | |
| "learning_rate": 9.879367297683499e-06, | |
| "loss": 1.4906, | |
| "mean_token_accuracy": 0.6145053148269654, | |
| "num_tokens": 17137188.0, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.4920877998979071, | |
| "grad_norm": 12.906387329101562, | |
| "learning_rate": 9.877918975571627e-06, | |
| "loss": 1.3995, | |
| "mean_token_accuracy": 0.6356921285390854, | |
| "num_tokens": 17205087.0, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.4941296579887698, | |
| "grad_norm": 13.7039213180542, | |
| "learning_rate": 9.876462118390758e-06, | |
| "loss": 1.35, | |
| "mean_token_accuracy": 0.642990505695343, | |
| "num_tokens": 17279430.0, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.49617151607963245, | |
| "grad_norm": 13.48718547821045, | |
| "learning_rate": 9.874996728690005e-06, | |
| "loss": 1.4008, | |
| "mean_token_accuracy": 0.6324507027864457, | |
| "num_tokens": 17345739.0, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.49821337417049516, | |
| "grad_norm": 13.046002388000488, | |
| "learning_rate": 9.873522809033414e-06, | |
| "loss": 1.4148, | |
| "mean_token_accuracy": 0.6250188022851944, | |
| "num_tokens": 17415957.0, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.5002552322613578, | |
| "grad_norm": 12.571805953979492, | |
| "learning_rate": 9.872040361999953e-06, | |
| "loss": 1.3033, | |
| "mean_token_accuracy": 0.6560938596725464, | |
| "num_tokens": 17488686.0, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.5022970903522205, | |
| "grad_norm": 13.674259185791016, | |
| "learning_rate": 9.870549390183513e-06, | |
| "loss": 1.3663, | |
| "mean_token_accuracy": 0.6375747203826905, | |
| "num_tokens": 17564148.0, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.5043389484430832, | |
| "grad_norm": 13.26236343383789, | |
| "learning_rate": 9.869049896192902e-06, | |
| "loss": 1.4163, | |
| "mean_token_accuracy": 0.6320332467556, | |
| "num_tokens": 17635358.0, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.5063808065339459, | |
| "grad_norm": 13.495912551879883, | |
| "learning_rate": 9.867541882651839e-06, | |
| "loss": 1.4292, | |
| "mean_token_accuracy": 0.6307185232639313, | |
| "num_tokens": 17708919.0, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.5084226646248086, | |
| "grad_norm": 15.353795051574707, | |
| "learning_rate": 9.866025352198944e-06, | |
| "loss": 1.4209, | |
| "mean_token_accuracy": 0.6328317284584045, | |
| "num_tokens": 17784820.0, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.5104645227156712, | |
| "grad_norm": 14.236472129821777, | |
| "learning_rate": 9.864500307487751e-06, | |
| "loss": 1.3915, | |
| "mean_token_accuracy": 0.629261365532875, | |
| "num_tokens": 17857549.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.512506380806534, | |
| "grad_norm": 13.635055541992188, | |
| "learning_rate": 9.86296675118668e-06, | |
| "loss": 1.4043, | |
| "mean_token_accuracy": 0.6366223454475403, | |
| "num_tokens": 17927963.0, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.5145482388973966, | |
| "grad_norm": 13.6681547164917, | |
| "learning_rate": 9.861424685979054e-06, | |
| "loss": 1.439, | |
| "mean_token_accuracy": 0.6203552514314652, | |
| "num_tokens": 18001509.0, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.5165900969882593, | |
| "grad_norm": 12.835921287536621, | |
| "learning_rate": 9.859874114563075e-06, | |
| "loss": 1.3936, | |
| "mean_token_accuracy": 0.6366373240947724, | |
| "num_tokens": 18072963.0, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.518631955079122, | |
| "grad_norm": 13.316462516784668, | |
| "learning_rate": 9.858315039651836e-06, | |
| "loss": 1.3831, | |
| "mean_token_accuracy": 0.6388492465019227, | |
| "num_tokens": 18142046.0, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.5206738131699847, | |
| "grad_norm": 12.890396118164062, | |
| "learning_rate": 9.856747463973303e-06, | |
| "loss": 1.5356, | |
| "mean_token_accuracy": 0.620140865445137, | |
| "num_tokens": 18212201.0, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.5227156712608474, | |
| "grad_norm": 12.411418914794922, | |
| "learning_rate": 9.855171390270325e-06, | |
| "loss": 1.3723, | |
| "mean_token_accuracy": 0.6438209116458893, | |
| "num_tokens": 18283060.0, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.52475752935171, | |
| "grad_norm": 13.509264945983887, | |
| "learning_rate": 9.853586821300608e-06, | |
| "loss": 1.429, | |
| "mean_token_accuracy": 0.6353526145219803, | |
| "num_tokens": 18356527.0, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.5267993874425727, | |
| "grad_norm": 13.333746910095215, | |
| "learning_rate": 9.851993759836734e-06, | |
| "loss": 1.4291, | |
| "mean_token_accuracy": 0.6351711452007294, | |
| "num_tokens": 18426397.0, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.5288412455334355, | |
| "grad_norm": 13.60482120513916, | |
| "learning_rate": 9.850392208666134e-06, | |
| "loss": 1.3642, | |
| "mean_token_accuracy": 0.6275977313518524, | |
| "num_tokens": 18500416.0, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.5308831036242981, | |
| "grad_norm": 13.40274429321289, | |
| "learning_rate": 9.848782170591103e-06, | |
| "loss": 1.4108, | |
| "mean_token_accuracy": 0.6340374439954758, | |
| "num_tokens": 18573354.0, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.5329249617151608, | |
| "grad_norm": 13.70960521697998, | |
| "learning_rate": 9.847163648428783e-06, | |
| "loss": 1.3876, | |
| "mean_token_accuracy": 0.6381645023822784, | |
| "num_tokens": 18642953.0, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.5349668198060235, | |
| "grad_norm": 13.797345161437988, | |
| "learning_rate": 9.845536645011157e-06, | |
| "loss": 1.3801, | |
| "mean_token_accuracy": 0.6419808328151703, | |
| "num_tokens": 18719168.0, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.5370086778968862, | |
| "grad_norm": 13.198974609375, | |
| "learning_rate": 9.843901163185055e-06, | |
| "loss": 1.4107, | |
| "mean_token_accuracy": 0.6292454957962036, | |
| "num_tokens": 18791570.0, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.5390505359877489, | |
| "grad_norm": 13.272690773010254, | |
| "learning_rate": 9.842257205812133e-06, | |
| "loss": 1.3883, | |
| "mean_token_accuracy": 0.642322301864624, | |
| "num_tokens": 18862900.0, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.5410923940786115, | |
| "grad_norm": 12.19058895111084, | |
| "learning_rate": 9.840604775768885e-06, | |
| "loss": 1.371, | |
| "mean_token_accuracy": 0.6479323506355286, | |
| "num_tokens": 18932883.0, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.5431342521694742, | |
| "grad_norm": 13.2040376663208, | |
| "learning_rate": 9.838943875946628e-06, | |
| "loss": 1.4196, | |
| "mean_token_accuracy": 0.6349193304777145, | |
| "num_tokens": 19004875.0, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.5451761102603369, | |
| "grad_norm": 13.854543685913086, | |
| "learning_rate": 9.837274509251494e-06, | |
| "loss": 1.4093, | |
| "mean_token_accuracy": 0.6391417652368545, | |
| "num_tokens": 19076124.0, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.5472179683511996, | |
| "grad_norm": 13.256423950195312, | |
| "learning_rate": 9.835596678604435e-06, | |
| "loss": 1.3879, | |
| "mean_token_accuracy": 0.6399724900722503, | |
| "num_tokens": 19149584.0, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.5492598264420623, | |
| "grad_norm": 13.840933799743652, | |
| "learning_rate": 9.833910386941213e-06, | |
| "loss": 1.433, | |
| "mean_token_accuracy": 0.6246784031391144, | |
| "num_tokens": 19221353.0, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.5513016845329249, | |
| "grad_norm": 12.89402961730957, | |
| "learning_rate": 9.83221563721239e-06, | |
| "loss": 1.443, | |
| "mean_token_accuracy": 0.635868039727211, | |
| "num_tokens": 19295418.0, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.5533435426237876, | |
| "grad_norm": 12.286266326904297, | |
| "learning_rate": 9.830512432383333e-06, | |
| "loss": 1.4191, | |
| "mean_token_accuracy": 0.6380386412143707, | |
| "num_tokens": 19370379.0, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.5553854007146504, | |
| "grad_norm": 14.805827140808105, | |
| "learning_rate": 9.828800775434197e-06, | |
| "loss": 1.3819, | |
| "mean_token_accuracy": 0.6385849773883819, | |
| "num_tokens": 19441372.0, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.557427258805513, | |
| "grad_norm": 13.304326057434082, | |
| "learning_rate": 9.827080669359933e-06, | |
| "loss": 1.4274, | |
| "mean_token_accuracy": 0.6263547629117966, | |
| "num_tokens": 19515686.0, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.5594691168963757, | |
| "grad_norm": 13.437885284423828, | |
| "learning_rate": 9.825352117170269e-06, | |
| "loss": 1.3906, | |
| "mean_token_accuracy": 0.648447448015213, | |
| "num_tokens": 19584899.0, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.5615109749872383, | |
| "grad_norm": 12.786911964416504, | |
| "learning_rate": 9.823615121889716e-06, | |
| "loss": 1.3985, | |
| "mean_token_accuracy": 0.6387988567352295, | |
| "num_tokens": 19657466.0, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.5635528330781011, | |
| "grad_norm": 13.658928871154785, | |
| "learning_rate": 9.821869686557555e-06, | |
| "loss": 1.3936, | |
| "mean_token_accuracy": 0.6408439576625824, | |
| "num_tokens": 19725251.0, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.5655946911689638, | |
| "grad_norm": 12.969337463378906, | |
| "learning_rate": 9.82011581422784e-06, | |
| "loss": 1.431, | |
| "mean_token_accuracy": 0.628329211473465, | |
| "num_tokens": 19793131.0, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.5676365492598264, | |
| "grad_norm": 12.977407455444336, | |
| "learning_rate": 9.818353507969379e-06, | |
| "loss": 1.4193, | |
| "mean_token_accuracy": 0.6275966912508011, | |
| "num_tokens": 19869081.0, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.5696784073506891, | |
| "grad_norm": 12.836411476135254, | |
| "learning_rate": 9.816582770865747e-06, | |
| "loss": 1.3117, | |
| "mean_token_accuracy": 0.6492154330015183, | |
| "num_tokens": 19939561.0, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 0.5717202654415519, | |
| "grad_norm": 13.496788024902344, | |
| "learning_rate": 9.814803606015263e-06, | |
| "loss": 1.394, | |
| "mean_token_accuracy": 0.6368442118167877, | |
| "num_tokens": 20008753.0, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.5737621235324145, | |
| "grad_norm": 14.3073148727417, | |
| "learning_rate": 9.813016016530996e-06, | |
| "loss": 1.4656, | |
| "mean_token_accuracy": 0.6255601584911347, | |
| "num_tokens": 20079759.0, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 0.5758039816232772, | |
| "grad_norm": 13.646188735961914, | |
| "learning_rate": 9.811220005540756e-06, | |
| "loss": 1.4906, | |
| "mean_token_accuracy": 0.6181686758995056, | |
| "num_tokens": 20150221.0, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.5778458397141398, | |
| "grad_norm": 14.640626907348633, | |
| "learning_rate": 9.809415576187087e-06, | |
| "loss": 1.4371, | |
| "mean_token_accuracy": 0.6324725329875946, | |
| "num_tokens": 20221920.0, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 0.5798876978050026, | |
| "grad_norm": 15.234823226928711, | |
| "learning_rate": 9.807602731627262e-06, | |
| "loss": 1.4532, | |
| "mean_token_accuracy": 0.6349549174308777, | |
| "num_tokens": 20292653.0, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.5819295558958653, | |
| "grad_norm": 13.222195625305176, | |
| "learning_rate": 9.805781475033282e-06, | |
| "loss": 1.4338, | |
| "mean_token_accuracy": 0.6166985005140304, | |
| "num_tokens": 20366340.0, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.5839714139867279, | |
| "grad_norm": 14.15178394317627, | |
| "learning_rate": 9.803951809591866e-06, | |
| "loss": 1.4232, | |
| "mean_token_accuracy": 0.6322162270545959, | |
| "num_tokens": 20432957.0, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.5860132720775906, | |
| "grad_norm": 12.758091926574707, | |
| "learning_rate": 9.80211373850444e-06, | |
| "loss": 1.4142, | |
| "mean_token_accuracy": 0.6438927322626113, | |
| "num_tokens": 20505606.0, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 0.5880551301684533, | |
| "grad_norm": 13.361724853515625, | |
| "learning_rate": 9.800267264987147e-06, | |
| "loss": 1.3776, | |
| "mean_token_accuracy": 0.6391714990139008, | |
| "num_tokens": 20579466.0, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.590096988259316, | |
| "grad_norm": 13.651033401489258, | |
| "learning_rate": 9.798412392270825e-06, | |
| "loss": 1.3711, | |
| "mean_token_accuracy": 0.6416450500488281, | |
| "num_tokens": 20652379.0, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 0.5921388463501787, | |
| "grad_norm": 14.636039733886719, | |
| "learning_rate": 9.796549123601012e-06, | |
| "loss": 1.3782, | |
| "mean_token_accuracy": 0.6453828096389771, | |
| "num_tokens": 20722216.0, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.5941807044410413, | |
| "grad_norm": 13.31446361541748, | |
| "learning_rate": 9.794677462237936e-06, | |
| "loss": 1.4271, | |
| "mean_token_accuracy": 0.6281617492437362, | |
| "num_tokens": 20794125.0, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 0.5962225625319041, | |
| "grad_norm": 14.600592613220215, | |
| "learning_rate": 9.792797411456509e-06, | |
| "loss": 1.476, | |
| "mean_token_accuracy": 0.6172799646854401, | |
| "num_tokens": 20863211.0, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.5982644206227667, | |
| "grad_norm": 14.365285873413086, | |
| "learning_rate": 9.790908974546326e-06, | |
| "loss": 1.4591, | |
| "mean_token_accuracy": 0.631415992975235, | |
| "num_tokens": 20933317.0, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 0.6003062787136294, | |
| "grad_norm": 13.793408393859863, | |
| "learning_rate": 9.789012154811648e-06, | |
| "loss": 1.4121, | |
| "mean_token_accuracy": 0.6378588736057281, | |
| "num_tokens": 21002698.0, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.6023481368044921, | |
| "grad_norm": 12.982183456420898, | |
| "learning_rate": 9.78710695557141e-06, | |
| "loss": 1.3554, | |
| "mean_token_accuracy": 0.6380349576473237, | |
| "num_tokens": 21074284.0, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.6043899948953547, | |
| "grad_norm": 14.370036125183105, | |
| "learning_rate": 9.785193380159208e-06, | |
| "loss": 1.4423, | |
| "mean_token_accuracy": 0.6347444444894791, | |
| "num_tokens": 21148349.0, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.6064318529862175, | |
| "grad_norm": 12.563482284545898, | |
| "learning_rate": 9.783271431923293e-06, | |
| "loss": 1.3354, | |
| "mean_token_accuracy": 0.6468933254480362, | |
| "num_tokens": 21219321.0, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 0.6084737110770801, | |
| "grad_norm": 15.911494255065918, | |
| "learning_rate": 9.781341114226565e-06, | |
| "loss": 1.426, | |
| "mean_token_accuracy": 0.6219086617231369, | |
| "num_tokens": 21291472.0, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.6105155691679428, | |
| "grad_norm": 14.266389846801758, | |
| "learning_rate": 9.779402430446572e-06, | |
| "loss": 1.3806, | |
| "mean_token_accuracy": 0.6449051052331924, | |
| "num_tokens": 21363629.0, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 0.6125574272588055, | |
| "grad_norm": 12.48147964477539, | |
| "learning_rate": 9.777455383975496e-06, | |
| "loss": 1.4112, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.6125574272588055, | |
| "eval_loss": 1.440710425376892, | |
| "eval_mean_token_accuracy": 0.6296637463097048, | |
| "eval_num_tokens": 21437325.0, | |
| "eval_runtime": 991.1208, | |
| "eval_samples_per_second": 4.988, | |
| "eval_steps_per_second": 1.247, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.6145992853496682, | |
| "grad_norm": 14.174776077270508, | |
| "learning_rate": 9.775499978220155e-06, | |
| "loss": 1.4097, | |
| "mean_token_accuracy": 0.6289676755666733, | |
| "num_tokens": 21512874.0, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 0.6166411434405309, | |
| "grad_norm": 14.071956634521484, | |
| "learning_rate": 9.773536216601994e-06, | |
| "loss": 1.4198, | |
| "mean_token_accuracy": 0.6249706447124481, | |
| "num_tokens": 21586472.0, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 0.6186830015313936, | |
| "grad_norm": 12.335860252380371, | |
| "learning_rate": 9.771564102557072e-06, | |
| "loss": 1.4183, | |
| "mean_token_accuracy": 0.632024022936821, | |
| "num_tokens": 21657572.0, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 0.6207248596222562, | |
| "grad_norm": 13.708938598632812, | |
| "learning_rate": 9.769583639536071e-06, | |
| "loss": 1.4158, | |
| "mean_token_accuracy": 0.6340501457452774, | |
| "num_tokens": 21729103.0, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 0.622766717713119, | |
| "grad_norm": 12.857871055603027, | |
| "learning_rate": 9.767594831004277e-06, | |
| "loss": 1.4024, | |
| "mean_token_accuracy": 0.6416067212820054, | |
| "num_tokens": 21805653.0, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.6248085758039816, | |
| "grad_norm": 15.368263244628906, | |
| "learning_rate": 9.76559768044158e-06, | |
| "loss": 1.3593, | |
| "mean_token_accuracy": 0.6511017084121704, | |
| "num_tokens": 21878088.0, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 0.6268504338948443, | |
| "grad_norm": 13.683640480041504, | |
| "learning_rate": 9.763592191342467e-06, | |
| "loss": 1.4604, | |
| "mean_token_accuracy": 0.6294127464294433, | |
| "num_tokens": 21947896.0, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 0.628892291985707, | |
| "grad_norm": 12.823150634765625, | |
| "learning_rate": 9.761578367216008e-06, | |
| "loss": 1.4111, | |
| "mean_token_accuracy": 0.6234809100627899, | |
| "num_tokens": 22020392.0, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 0.6309341500765697, | |
| "grad_norm": 14.795945167541504, | |
| "learning_rate": 9.75955621158587e-06, | |
| "loss": 1.3956, | |
| "mean_token_accuracy": 0.6330976217985154, | |
| "num_tokens": 22088766.0, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 0.6329760081674324, | |
| "grad_norm": 13.43422794342041, | |
| "learning_rate": 9.757525727990286e-06, | |
| "loss": 1.4851, | |
| "mean_token_accuracy": 0.6163936108350754, | |
| "num_tokens": 22158579.0, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.635017866258295, | |
| "grad_norm": 11.95371150970459, | |
| "learning_rate": 9.755486919982067e-06, | |
| "loss": 1.4307, | |
| "mean_token_accuracy": 0.6282651394605636, | |
| "num_tokens": 22226235.0, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 0.6370597243491577, | |
| "grad_norm": 12.942562103271484, | |
| "learning_rate": 9.75343979112859e-06, | |
| "loss": 1.3729, | |
| "mean_token_accuracy": 0.640126034617424, | |
| "num_tokens": 22295629.0, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 0.6391015824400205, | |
| "grad_norm": 15.593914031982422, | |
| "learning_rate": 9.751384345011787e-06, | |
| "loss": 1.442, | |
| "mean_token_accuracy": 0.6314625293016434, | |
| "num_tokens": 22365531.0, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 0.6411434405308831, | |
| "grad_norm": 14.191858291625977, | |
| "learning_rate": 9.749320585228148e-06, | |
| "loss": 1.4015, | |
| "mean_token_accuracy": 0.6351157635450363, | |
| "num_tokens": 22433566.0, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 0.6431852986217458, | |
| "grad_norm": 15.005651473999023, | |
| "learning_rate": 9.747248515388705e-06, | |
| "loss": 1.3679, | |
| "mean_token_accuracy": 0.646827119588852, | |
| "num_tokens": 22505167.0, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.6452271567126084, | |
| "grad_norm": 13.515469551086426, | |
| "learning_rate": 9.745168139119033e-06, | |
| "loss": 1.4565, | |
| "mean_token_accuracy": 0.6296883016824723, | |
| "num_tokens": 22574602.0, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 0.6472690148034712, | |
| "grad_norm": 12.68270206451416, | |
| "learning_rate": 9.743079460059241e-06, | |
| "loss": 1.3882, | |
| "mean_token_accuracy": 0.6341635257005691, | |
| "num_tokens": 22646932.0, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 0.6493108728943339, | |
| "grad_norm": 14.113992691040039, | |
| "learning_rate": 9.740982481863967e-06, | |
| "loss": 1.473, | |
| "mean_token_accuracy": 0.6364928364753724, | |
| "num_tokens": 22718533.0, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 0.6513527309851965, | |
| "grad_norm": 11.577704429626465, | |
| "learning_rate": 9.738877208202363e-06, | |
| "loss": 1.4147, | |
| "mean_token_accuracy": 0.6371136277914047, | |
| "num_tokens": 22790566.0, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 0.6533945890760592, | |
| "grad_norm": 12.993800163269043, | |
| "learning_rate": 9.73676364275811e-06, | |
| "loss": 1.3988, | |
| "mean_token_accuracy": 0.636861988902092, | |
| "num_tokens": 22853994.0, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.655436447166922, | |
| "grad_norm": 12.155184745788574, | |
| "learning_rate": 9.734641789229381e-06, | |
| "loss": 1.3366, | |
| "mean_token_accuracy": 0.6418772786855698, | |
| "num_tokens": 22925430.0, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 0.6574783052577846, | |
| "grad_norm": 15.805740356445312, | |
| "learning_rate": 9.732511651328864e-06, | |
| "loss": 1.3588, | |
| "mean_token_accuracy": 0.6364259988069534, | |
| "num_tokens": 22997748.0, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 0.6595201633486473, | |
| "grad_norm": 13.210399627685547, | |
| "learning_rate": 9.730373232783735e-06, | |
| "loss": 1.3825, | |
| "mean_token_accuracy": 0.6286744445562362, | |
| "num_tokens": 23069478.0, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 0.6615620214395099, | |
| "grad_norm": 13.883077621459961, | |
| "learning_rate": 9.72822653733566e-06, | |
| "loss": 1.3418, | |
| "mean_token_accuracy": 0.6431337684392929, | |
| "num_tokens": 23137148.0, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 0.6636038795303726, | |
| "grad_norm": 12.698500633239746, | |
| "learning_rate": 9.726071568740793e-06, | |
| "loss": 1.4307, | |
| "mean_token_accuracy": 0.634193229675293, | |
| "num_tokens": 23212626.0, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.6656457376212354, | |
| "grad_norm": 12.851318359375, | |
| "learning_rate": 9.723908330769756e-06, | |
| "loss": 1.3716, | |
| "mean_token_accuracy": 0.6405917227268219, | |
| "num_tokens": 23284143.0, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 0.667687595712098, | |
| "grad_norm": 13.096170425415039, | |
| "learning_rate": 9.721736827207644e-06, | |
| "loss": 1.3898, | |
| "mean_token_accuracy": 0.6286759912967682, | |
| "num_tokens": 23357005.0, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 0.6697294538029607, | |
| "grad_norm": 12.506603240966797, | |
| "learning_rate": 9.719557061854017e-06, | |
| "loss": 1.3495, | |
| "mean_token_accuracy": 0.6530344665050507, | |
| "num_tokens": 23431394.0, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 0.6717713118938233, | |
| "grad_norm": 13.819788932800293, | |
| "learning_rate": 9.717369038522884e-06, | |
| "loss": 1.3564, | |
| "mean_token_accuracy": 0.6386108338832855, | |
| "num_tokens": 23504003.0, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 0.6738131699846861, | |
| "grad_norm": 13.408661842346191, | |
| "learning_rate": 9.715172761042711e-06, | |
| "loss": 1.3723, | |
| "mean_token_accuracy": 0.628694823384285, | |
| "num_tokens": 23577951.0, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.6758550280755488, | |
| "grad_norm": 13.23100757598877, | |
| "learning_rate": 9.712968233256401e-06, | |
| "loss": 1.4124, | |
| "mean_token_accuracy": 0.6347943753004074, | |
| "num_tokens": 23647422.0, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 0.6778968861664114, | |
| "grad_norm": 14.13464641571045, | |
| "learning_rate": 9.710755459021297e-06, | |
| "loss": 1.3851, | |
| "mean_token_accuracy": 0.6412581115961075, | |
| "num_tokens": 23719685.0, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 0.6799387442572741, | |
| "grad_norm": 13.437444686889648, | |
| "learning_rate": 9.708534442209168e-06, | |
| "loss": 1.3589, | |
| "mean_token_accuracy": 0.6525840878486633, | |
| "num_tokens": 23792553.0, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 0.6819806023481368, | |
| "grad_norm": 14.053814888000488, | |
| "learning_rate": 9.706305186706207e-06, | |
| "loss": 1.4502, | |
| "mean_token_accuracy": 0.626780903339386, | |
| "num_tokens": 23866377.0, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 0.6840224604389995, | |
| "grad_norm": 13.260232925415039, | |
| "learning_rate": 9.70406769641302e-06, | |
| "loss": 1.3332, | |
| "mean_token_accuracy": 0.6482997745275497, | |
| "num_tokens": 23936488.0, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.6860643185298622, | |
| "grad_norm": 13.664006233215332, | |
| "learning_rate": 9.701821975244625e-06, | |
| "loss": 1.4597, | |
| "mean_token_accuracy": 0.6262546479701996, | |
| "num_tokens": 24009134.0, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 0.6881061766207248, | |
| "grad_norm": 13.392569541931152, | |
| "learning_rate": 9.699568027130442e-06, | |
| "loss": 1.3125, | |
| "mean_token_accuracy": 0.6487543761730195, | |
| "num_tokens": 24080618.0, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 0.6901480347115876, | |
| "grad_norm": 12.633986473083496, | |
| "learning_rate": 9.697305856014281e-06, | |
| "loss": 1.3968, | |
| "mean_token_accuracy": 0.6403874427080154, | |
| "num_tokens": 24154567.0, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 0.6921898928024502, | |
| "grad_norm": 14.033143997192383, | |
| "learning_rate": 9.695035465854348e-06, | |
| "loss": 1.4009, | |
| "mean_token_accuracy": 0.6387769609689713, | |
| "num_tokens": 24221960.0, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 0.6942317508933129, | |
| "grad_norm": 13.039753913879395, | |
| "learning_rate": 9.692756860623221e-06, | |
| "loss": 1.3506, | |
| "mean_token_accuracy": 0.6354148983955383, | |
| "num_tokens": 24292743.0, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.6962736089841756, | |
| "grad_norm": 11.965189933776855, | |
| "learning_rate": 9.690470044307861e-06, | |
| "loss": 1.4325, | |
| "mean_token_accuracy": 0.6360249876976013, | |
| "num_tokens": 24362477.0, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 0.6983154670750383, | |
| "grad_norm": 12.434362411499023, | |
| "learning_rate": 9.688175020909589e-06, | |
| "loss": 1.3562, | |
| "mean_token_accuracy": 0.6378575742244721, | |
| "num_tokens": 24432920.0, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 0.700357325165901, | |
| "grad_norm": 12.607736587524414, | |
| "learning_rate": 9.685871794444092e-06, | |
| "loss": 1.3255, | |
| "mean_token_accuracy": 0.646689772605896, | |
| "num_tokens": 24503177.0, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 0.7023991832567636, | |
| "grad_norm": 12.687234878540039, | |
| "learning_rate": 9.683560368941406e-06, | |
| "loss": 1.3244, | |
| "mean_token_accuracy": 0.6440610259771347, | |
| "num_tokens": 24573187.0, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 0.7044410413476263, | |
| "grad_norm": 13.45374584197998, | |
| "learning_rate": 9.681240748445914e-06, | |
| "loss": 1.3499, | |
| "mean_token_accuracy": 0.6462150484323501, | |
| "num_tokens": 24646749.0, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.7064828994384891, | |
| "grad_norm": 13.070556640625, | |
| "learning_rate": 9.678912937016343e-06, | |
| "loss": 1.4068, | |
| "mean_token_accuracy": 0.6340273886919021, | |
| "num_tokens": 24716266.0, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 0.7085247575293517, | |
| "grad_norm": 13.016335487365723, | |
| "learning_rate": 9.676576938725742e-06, | |
| "loss": 1.4276, | |
| "mean_token_accuracy": 0.6312612593173981, | |
| "num_tokens": 24782591.0, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 0.7105666156202144, | |
| "grad_norm": 12.000085830688477, | |
| "learning_rate": 9.674232757661495e-06, | |
| "loss": 1.4557, | |
| "mean_token_accuracy": 0.6277955800294877, | |
| "num_tokens": 24855167.0, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 0.712608473711077, | |
| "grad_norm": 13.089092254638672, | |
| "learning_rate": 9.671880397925296e-06, | |
| "loss": 1.3836, | |
| "mean_token_accuracy": 0.644837275147438, | |
| "num_tokens": 24929959.0, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 0.7146503318019398, | |
| "grad_norm": 12.519915580749512, | |
| "learning_rate": 9.669519863633155e-06, | |
| "loss": 1.4579, | |
| "mean_token_accuracy": 0.6203823953866958, | |
| "num_tokens": 25001204.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.7166921898928025, | |
| "grad_norm": 13.97362232208252, | |
| "learning_rate": 9.667151158915382e-06, | |
| "loss": 1.3887, | |
| "mean_token_accuracy": 0.6337443590164185, | |
| "num_tokens": 25069613.0, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 0.7187340479836651, | |
| "grad_norm": 14.413997650146484, | |
| "learning_rate": 9.664774287916587e-06, | |
| "loss": 1.4879, | |
| "mean_token_accuracy": 0.6237174719572067, | |
| "num_tokens": 25140521.0, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 0.7207759060745278, | |
| "grad_norm": 14.139748573303223, | |
| "learning_rate": 9.662389254795661e-06, | |
| "loss": 1.3745, | |
| "mean_token_accuracy": 0.6430779010057449, | |
| "num_tokens": 25213201.0, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 0.7228177641653905, | |
| "grad_norm": 13.43658447265625, | |
| "learning_rate": 9.659996063725787e-06, | |
| "loss": 1.3662, | |
| "mean_token_accuracy": 0.6530295521020889, | |
| "num_tokens": 25284506.0, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 0.7248596222562532, | |
| "grad_norm": 13.512331008911133, | |
| "learning_rate": 9.657594718894414e-06, | |
| "loss": 1.4379, | |
| "mean_token_accuracy": 0.6244717538356781, | |
| "num_tokens": 25355630.0, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.7269014803471159, | |
| "grad_norm": 12.112098693847656, | |
| "learning_rate": 9.65518522450326e-06, | |
| "loss": 1.4099, | |
| "mean_token_accuracy": 0.6363851726055145, | |
| "num_tokens": 25429960.0, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 0.7289433384379785, | |
| "grad_norm": 12.75960922241211, | |
| "learning_rate": 9.652767584768304e-06, | |
| "loss": 1.4137, | |
| "mean_token_accuracy": 0.6329428285360337, | |
| "num_tokens": 25501298.0, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 0.7309851965288412, | |
| "grad_norm": 13.585481643676758, | |
| "learning_rate": 9.650341803919779e-06, | |
| "loss": 1.3472, | |
| "mean_token_accuracy": 0.6461682111024857, | |
| "num_tokens": 25573820.0, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 0.733027054619704, | |
| "grad_norm": 12.37596607208252, | |
| "learning_rate": 9.647907886202157e-06, | |
| "loss": 1.3985, | |
| "mean_token_accuracy": 0.6342933237552643, | |
| "num_tokens": 25645428.0, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 0.7350689127105666, | |
| "grad_norm": 13.346282958984375, | |
| "learning_rate": 9.645465835874152e-06, | |
| "loss": 1.3366, | |
| "mean_token_accuracy": 0.6504071831703186, | |
| "num_tokens": 25717166.0, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.7371107708014293, | |
| "grad_norm": 13.590423583984375, | |
| "learning_rate": 9.643015657208708e-06, | |
| "loss": 1.4408, | |
| "mean_token_accuracy": 0.6356685668230057, | |
| "num_tokens": 25786912.0, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 0.7391526288922919, | |
| "grad_norm": 13.47347640991211, | |
| "learning_rate": 9.640557354492988e-06, | |
| "loss": 1.4635, | |
| "mean_token_accuracy": 0.6147109150886536, | |
| "num_tokens": 25856128.0, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 0.7411944869831547, | |
| "grad_norm": 13.709653854370117, | |
| "learning_rate": 9.638090932028373e-06, | |
| "loss": 1.3778, | |
| "mean_token_accuracy": 0.6359743535518646, | |
| "num_tokens": 25927390.0, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 0.7432363450740174, | |
| "grad_norm": 11.859482765197754, | |
| "learning_rate": 9.63561639413045e-06, | |
| "loss": 1.3549, | |
| "mean_token_accuracy": 0.6486242324113846, | |
| "num_tokens": 26002907.0, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 0.74527820316488, | |
| "grad_norm": 12.219711303710938, | |
| "learning_rate": 9.633133745129008e-06, | |
| "loss": 1.3967, | |
| "mean_token_accuracy": 0.6251788079738617, | |
| "num_tokens": 26073293.0, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.7473200612557427, | |
| "grad_norm": 13.043457984924316, | |
| "learning_rate": 9.630642989368024e-06, | |
| "loss": 1.4081, | |
| "mean_token_accuracy": 0.6368646204471589, | |
| "num_tokens": 26145821.0, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 0.7493619193466055, | |
| "grad_norm": 14.711187362670898, | |
| "learning_rate": 9.628144131205665e-06, | |
| "loss": 1.446, | |
| "mean_token_accuracy": 0.6285335540771484, | |
| "num_tokens": 26213680.0, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 0.7514037774374681, | |
| "grad_norm": 12.913311958312988, | |
| "learning_rate": 9.62563717501427e-06, | |
| "loss": 1.4466, | |
| "mean_token_accuracy": 0.6235233783721924, | |
| "num_tokens": 26284742.0, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 0.7534456355283308, | |
| "grad_norm": 12.046110153198242, | |
| "learning_rate": 9.623122125180351e-06, | |
| "loss": 1.4176, | |
| "mean_token_accuracy": 0.643735259771347, | |
| "num_tokens": 26354716.0, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 0.7554874936191934, | |
| "grad_norm": 13.422168731689453, | |
| "learning_rate": 9.620598986104578e-06, | |
| "loss": 1.4649, | |
| "mean_token_accuracy": 0.6326315462589264, | |
| "num_tokens": 26426125.0, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.7575293517100562, | |
| "grad_norm": 12.690073013305664, | |
| "learning_rate": 9.618067762201784e-06, | |
| "loss": 1.4224, | |
| "mean_token_accuracy": 0.6294170558452606, | |
| "num_tokens": 26497872.0, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 0.7595712098009189, | |
| "grad_norm": 12.08823013305664, | |
| "learning_rate": 9.615528457900934e-06, | |
| "loss": 1.3856, | |
| "mean_token_accuracy": 0.6507549166679383, | |
| "num_tokens": 26568238.0, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 0.7616130678917815, | |
| "grad_norm": 14.470535278320312, | |
| "learning_rate": 9.612981077645144e-06, | |
| "loss": 1.3912, | |
| "mean_token_accuracy": 0.6367805600166321, | |
| "num_tokens": 26635873.0, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 0.7636549259826442, | |
| "grad_norm": 13.872713088989258, | |
| "learning_rate": 9.610425625891656e-06, | |
| "loss": 1.4196, | |
| "mean_token_accuracy": 0.6281679064035416, | |
| "num_tokens": 26708209.0, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 0.7656967840735069, | |
| "grad_norm": 13.715387344360352, | |
| "learning_rate": 9.607862107111834e-06, | |
| "loss": 1.373, | |
| "mean_token_accuracy": 0.6392459809780121, | |
| "num_tokens": 26781659.0, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.7677386421643696, | |
| "grad_norm": 13.774560928344727, | |
| "learning_rate": 9.605290525791157e-06, | |
| "loss": 1.3897, | |
| "mean_token_accuracy": 0.6378937274217605, | |
| "num_tokens": 26853155.0, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 0.7697805002552323, | |
| "grad_norm": 12.794147491455078, | |
| "learning_rate": 9.602710886429218e-06, | |
| "loss": 1.3528, | |
| "mean_token_accuracy": 0.6442150235176086, | |
| "num_tokens": 26920655.0, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 0.7718223583460949, | |
| "grad_norm": 12.719574928283691, | |
| "learning_rate": 9.600123193539698e-06, | |
| "loss": 1.3502, | |
| "mean_token_accuracy": 0.6572590112686157, | |
| "num_tokens": 26992809.0, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 0.7738642164369577, | |
| "grad_norm": 13.51803970336914, | |
| "learning_rate": 9.597527451650376e-06, | |
| "loss": 1.3243, | |
| "mean_token_accuracy": 0.6524654120206833, | |
| "num_tokens": 27066434.0, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 0.7759060745278203, | |
| "grad_norm": 14.247166633605957, | |
| "learning_rate": 9.59492366530312e-06, | |
| "loss": 1.4145, | |
| "mean_token_accuracy": 0.6389972150325776, | |
| "num_tokens": 27139751.0, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.777947932618683, | |
| "grad_norm": 13.419672012329102, | |
| "learning_rate": 9.592311839053863e-06, | |
| "loss": 1.3721, | |
| "mean_token_accuracy": 0.6519317954778672, | |
| "num_tokens": 27213247.0, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 0.7799897907095457, | |
| "grad_norm": 13.287151336669922, | |
| "learning_rate": 9.589691977472612e-06, | |
| "loss": 1.3991, | |
| "mean_token_accuracy": 0.6346599578857421, | |
| "num_tokens": 27288707.0, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 0.7820316488004083, | |
| "grad_norm": 12.613758087158203, | |
| "learning_rate": 9.587064085143432e-06, | |
| "loss": 1.4093, | |
| "mean_token_accuracy": 0.6332722306251526, | |
| "num_tokens": 27364833.0, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 0.7840735068912711, | |
| "grad_norm": 13.21430778503418, | |
| "learning_rate": 9.584428166664441e-06, | |
| "loss": 1.3291, | |
| "mean_token_accuracy": 0.6426202535629273, | |
| "num_tokens": 27439232.0, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.7861153649821337, | |
| "grad_norm": 12.770293235778809, | |
| "learning_rate": 9.581784226647799e-06, | |
| "loss": 1.422, | |
| "mean_token_accuracy": 0.6253783494234085, | |
| "num_tokens": 27508261.0, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.7881572230729964, | |
| "grad_norm": 14.6603422164917, | |
| "learning_rate": 9.5791322697197e-06, | |
| "loss": 1.3018, | |
| "mean_token_accuracy": 0.6558138817548752, | |
| "num_tokens": 27579262.0, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 0.7901990811638591, | |
| "grad_norm": 13.182867050170898, | |
| "learning_rate": 9.576472300520372e-06, | |
| "loss": 1.3363, | |
| "mean_token_accuracy": 0.6500471413135529, | |
| "num_tokens": 27654478.0, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 0.7922409392547218, | |
| "grad_norm": 13.26388931274414, | |
| "learning_rate": 9.573804323704057e-06, | |
| "loss": 1.3474, | |
| "mean_token_accuracy": 0.6410001933574676, | |
| "num_tokens": 27729450.0, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 0.7942827973455845, | |
| "grad_norm": 13.69534969329834, | |
| "learning_rate": 9.571128343939006e-06, | |
| "loss": 1.407, | |
| "mean_token_accuracy": 0.6288270473480224, | |
| "num_tokens": 27800068.0, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 0.7963246554364471, | |
| "grad_norm": 12.891340255737305, | |
| "learning_rate": 9.56844436590748e-06, | |
| "loss": 1.2993, | |
| "mean_token_accuracy": 0.6599480360746384, | |
| "num_tokens": 27872575.0, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.7983665135273098, | |
| "grad_norm": 13.020922660827637, | |
| "learning_rate": 9.565752394305729e-06, | |
| "loss": 1.4082, | |
| "mean_token_accuracy": 0.6357982963323593, | |
| "num_tokens": 27944873.0, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 0.8004083716181726, | |
| "grad_norm": 13.040590286254883, | |
| "learning_rate": 9.563052433843993e-06, | |
| "loss": 1.3802, | |
| "mean_token_accuracy": 0.6361476242542267, | |
| "num_tokens": 28016338.0, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 0.8024502297090352, | |
| "grad_norm": 13.284208297729492, | |
| "learning_rate": 9.560344489246489e-06, | |
| "loss": 1.409, | |
| "mean_token_accuracy": 0.6426727443933486, | |
| "num_tokens": 28088927.0, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 0.8044920877998979, | |
| "grad_norm": 13.533629417419434, | |
| "learning_rate": 9.557628565251404e-06, | |
| "loss": 1.3227, | |
| "mean_token_accuracy": 0.6476307839155198, | |
| "num_tokens": 28159259.0, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 0.8065339458907606, | |
| "grad_norm": 16.405038833618164, | |
| "learning_rate": 9.554904666610889e-06, | |
| "loss": 1.4397, | |
| "mean_token_accuracy": 0.6269133925437927, | |
| "num_tokens": 28231271.0, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.8085758039816233, | |
| "grad_norm": 13.900541305541992, | |
| "learning_rate": 9.552172798091045e-06, | |
| "loss": 1.4283, | |
| "mean_token_accuracy": 0.6322760939598083, | |
| "num_tokens": 28301168.0, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 0.810617662072486, | |
| "grad_norm": 13.871532440185547, | |
| "learning_rate": 9.549432964471919e-06, | |
| "loss": 1.4501, | |
| "mean_token_accuracy": 0.6328884690999985, | |
| "num_tokens": 28379814.0, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 0.8126595201633486, | |
| "grad_norm": 12.453668594360352, | |
| "learning_rate": 9.546685170547496e-06, | |
| "loss": 1.3755, | |
| "mean_token_accuracy": 0.6485854834318161, | |
| "num_tokens": 28453322.0, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 0.8147013782542113, | |
| "grad_norm": 12.559453010559082, | |
| "learning_rate": 9.54392942112569e-06, | |
| "loss": 1.3336, | |
| "mean_token_accuracy": 0.6534389823675155, | |
| "num_tokens": 28523506.0, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 0.8167432363450741, | |
| "grad_norm": 11.901576042175293, | |
| "learning_rate": 9.541165721028337e-06, | |
| "loss": 1.3697, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.8167432363450741, | |
| "eval_loss": 1.430210828781128, | |
| "eval_mean_token_accuracy": 0.6311295317021774, | |
| "eval_num_tokens": 28595276.0, | |
| "eval_runtime": 994.082, | |
| "eval_samples_per_second": 4.973, | |
| "eval_steps_per_second": 1.243, | |
| "step": 4000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 24485, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.8128782228832911e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |