| { | |
| "best_global_step": 3000, | |
| "best_metric": 3.602764652887152, | |
| "best_model_checkpoint": "checkpoints/gpt-2_seq1024_mla192-0-0/checkpoint-3000", | |
| "epoch": 0.9042272624519629, | |
| "eval_steps": 300, | |
| "global_step": 3000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.006028181749679753, | |
| "grad_norm": 2.0171725749969482, | |
| "learning_rate": 3.166666666666667e-05, | |
| "loss": 10.4028, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.012056363499359506, | |
| "grad_norm": 1.66342031955719, | |
| "learning_rate": 6.500000000000001e-05, | |
| "loss": 9.2471, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.01808454524903926, | |
| "grad_norm": 0.7821094989776611, | |
| "learning_rate": 9.833333333333333e-05, | |
| "loss": 8.2622, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.02411272699871901, | |
| "grad_norm": 0.41097572445869446, | |
| "learning_rate": 0.00013166666666666665, | |
| "loss": 7.55, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.030140908748398764, | |
| "grad_norm": 0.5691717863082886, | |
| "learning_rate": 0.000165, | |
| "loss": 7.2211, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03616909049807852, | |
| "grad_norm": 0.7314554452896118, | |
| "learning_rate": 0.00019833333333333335, | |
| "loss": 6.9285, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.04219727224775827, | |
| "grad_norm": 1.0012000799179077, | |
| "learning_rate": 0.00023166666666666667, | |
| "loss": 6.6558, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.04822545399743802, | |
| "grad_norm": 1.0241062641143799, | |
| "learning_rate": 0.00026500000000000004, | |
| "loss": 6.4608, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.05425363574711778, | |
| "grad_norm": 0.9315752983093262, | |
| "learning_rate": 0.00029833333333333334, | |
| "loss": 6.2917, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.06028181749679753, | |
| "grad_norm": 0.8718670606613159, | |
| "learning_rate": 0.0003316666666666667, | |
| "loss": 6.134, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06630999924647728, | |
| "grad_norm": 0.7919574975967407, | |
| "learning_rate": 0.000365, | |
| "loss": 5.9921, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.07233818099615703, | |
| "grad_norm": 0.46674615144729614, | |
| "learning_rate": 0.00039833333333333333, | |
| "loss": 5.854, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.07836636274583679, | |
| "grad_norm": 0.6191166043281555, | |
| "learning_rate": 0.0004316666666666667, | |
| "loss": 5.7443, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.08439454449551655, | |
| "grad_norm": 0.6590830087661743, | |
| "learning_rate": 0.000465, | |
| "loss": 5.6087, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.09042272624519629, | |
| "grad_norm": 1.0058612823486328, | |
| "learning_rate": 0.0004983333333333334, | |
| "loss": 5.5362, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.09042272624519629, | |
| "eval_loss": 5.480001850152933, | |
| "eval_perplexity": 239.84715112775478, | |
| "eval_runtime": 112.6058, | |
| "eval_samples_per_second": 14.733, | |
| "eval_steps_per_second": 0.231, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.09645090799487605, | |
| "grad_norm": 0.4994821846485138, | |
| "learning_rate": 0.0004964814814814814, | |
| "loss": 5.4092, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.1024790897445558, | |
| "grad_norm": 0.4082713723182678, | |
| "learning_rate": 0.0004927777777777777, | |
| "loss": 5.3019, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.10850727149423556, | |
| "grad_norm": 0.6187137961387634, | |
| "learning_rate": 0.0004890740740740741, | |
| "loss": 5.2061, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.1145354532439153, | |
| "grad_norm": 0.9009650349617004, | |
| "learning_rate": 0.0004853703703703704, | |
| "loss": 5.1354, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.12056363499359506, | |
| "grad_norm": 0.35602667927742004, | |
| "learning_rate": 0.0004816666666666667, | |
| "loss": 5.061, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.1265918167432748, | |
| "grad_norm": 0.4166870713233948, | |
| "learning_rate": 0.00047796296296296297, | |
| "loss": 4.9749, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.13261999849295456, | |
| "grad_norm": 0.5855613946914673, | |
| "learning_rate": 0.0004742592592592593, | |
| "loss": 4.9244, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.1386481802426343, | |
| "grad_norm": 0.44319576025009155, | |
| "learning_rate": 0.00047055555555555555, | |
| "loss": 4.8589, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.14467636199231407, | |
| "grad_norm": 0.36183497309684753, | |
| "learning_rate": 0.00046685185185185187, | |
| "loss": 4.8089, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.15070454374199382, | |
| "grad_norm": 0.6022749543190002, | |
| "learning_rate": 0.00046314814814814813, | |
| "loss": 4.7438, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.15673272549167358, | |
| "grad_norm": 0.5266714096069336, | |
| "learning_rate": 0.00045944444444444445, | |
| "loss": 4.6892, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.16276090724135334, | |
| "grad_norm": 0.4757547676563263, | |
| "learning_rate": 0.0004557407407407407, | |
| "loss": 4.6401, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.1687890889910331, | |
| "grad_norm": 0.516314685344696, | |
| "learning_rate": 0.00045203703703703703, | |
| "loss": 4.5807, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.17481727074071282, | |
| "grad_norm": 0.4857338070869446, | |
| "learning_rate": 0.0004483333333333333, | |
| "loss": 4.5335, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.18084545249039258, | |
| "grad_norm": 0.47939416766166687, | |
| "learning_rate": 0.00044462962962962967, | |
| "loss": 4.4956, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.18084545249039258, | |
| "eval_loss": 4.426357781719664, | |
| "eval_perplexity": 83.62627640080112, | |
| "eval_runtime": 112.2391, | |
| "eval_samples_per_second": 14.781, | |
| "eval_steps_per_second": 0.232, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.18687363424007233, | |
| "grad_norm": 0.4611107110977173, | |
| "learning_rate": 0.00044092592592592594, | |
| "loss": 4.4429, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.1929018159897521, | |
| "grad_norm": 0.36294886469841003, | |
| "learning_rate": 0.00043722222222222225, | |
| "loss": 4.4097, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.19892999773943185, | |
| "grad_norm": 0.4987587034702301, | |
| "learning_rate": 0.0004335185185185185, | |
| "loss": 4.3755, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.2049581794891116, | |
| "grad_norm": 0.32254645228385925, | |
| "learning_rate": 0.00042981481481481484, | |
| "loss": 4.3561, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.21098636123879136, | |
| "grad_norm": 0.6072115302085876, | |
| "learning_rate": 0.0004261111111111111, | |
| "loss": 4.3126, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.21701454298847112, | |
| "grad_norm": 0.4066341519355774, | |
| "learning_rate": 0.0004224074074074074, | |
| "loss": 4.2923, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.22304272473815084, | |
| "grad_norm": 0.3725150227546692, | |
| "learning_rate": 0.0004187037037037037, | |
| "loss": 4.263, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.2290709064878306, | |
| "grad_norm": 0.37484949827194214, | |
| "learning_rate": 0.000415, | |
| "loss": 4.2421, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.23509908823751036, | |
| "grad_norm": 0.431652694940567, | |
| "learning_rate": 0.00041129629629629627, | |
| "loss": 4.228, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.2411272699871901, | |
| "grad_norm": 0.3584285080432892, | |
| "learning_rate": 0.00040759259259259264, | |
| "loss": 4.2054, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.24715545173686987, | |
| "grad_norm": 0.4462144374847412, | |
| "learning_rate": 0.0004038888888888889, | |
| "loss": 4.1834, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.2531836334865496, | |
| "grad_norm": 0.29847240447998047, | |
| "learning_rate": 0.0004001851851851852, | |
| "loss": 4.1624, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.2592118152362294, | |
| "grad_norm": 0.4533553123474121, | |
| "learning_rate": 0.0003964814814814815, | |
| "loss": 4.1495, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.2652399969859091, | |
| "grad_norm": 0.4561823010444641, | |
| "learning_rate": 0.0003927777777777778, | |
| "loss": 4.1357, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.2712681787355889, | |
| "grad_norm": 0.33924582600593567, | |
| "learning_rate": 0.00038907407407407407, | |
| "loss": 4.1124, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.2712681787355889, | |
| "eval_loss": 4.058111604067862, | |
| "eval_perplexity": 57.86493593004541, | |
| "eval_runtime": 112.261, | |
| "eval_samples_per_second": 14.778, | |
| "eval_steps_per_second": 0.232, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.2772963604852686, | |
| "grad_norm": 0.5096554756164551, | |
| "learning_rate": 0.0003853703703703704, | |
| "loss": 4.1011, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.2833245422349484, | |
| "grad_norm": 0.3418212831020355, | |
| "learning_rate": 0.00038166666666666666, | |
| "loss": 4.09, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.28935272398462814, | |
| "grad_norm": 0.269795686006546, | |
| "learning_rate": 0.000377962962962963, | |
| "loss": 4.0681, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.29538090573430786, | |
| "grad_norm": 0.35889971256256104, | |
| "learning_rate": 0.00037425925925925924, | |
| "loss": 4.0686, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.30140908748398765, | |
| "grad_norm": 0.411249577999115, | |
| "learning_rate": 0.0003705555555555556, | |
| "loss": 4.0558, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.3074372692336674, | |
| "grad_norm": 0.3365558981895447, | |
| "learning_rate": 0.0003668518518518519, | |
| "loss": 4.0403, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.31346545098334716, | |
| "grad_norm": 0.35357165336608887, | |
| "learning_rate": 0.0003631481481481482, | |
| "loss": 4.0246, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.3194936327330269, | |
| "grad_norm": 0.25508153438568115, | |
| "learning_rate": 0.00035944444444444446, | |
| "loss": 4.0169, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.3255218144827067, | |
| "grad_norm": 0.3912709355354309, | |
| "learning_rate": 0.0003557407407407408, | |
| "loss": 4.0066, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.3315499962323864, | |
| "grad_norm": 0.5864457488059998, | |
| "learning_rate": 0.00035203703703703704, | |
| "loss": 3.9925, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.3375781779820662, | |
| "grad_norm": 0.2764241099357605, | |
| "learning_rate": 0.00034833333333333336, | |
| "loss": 3.9831, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.3436063597317459, | |
| "grad_norm": 0.330556720495224, | |
| "learning_rate": 0.0003446296296296296, | |
| "loss": 3.9741, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.34963454148142564, | |
| "grad_norm": 0.34419673681259155, | |
| "learning_rate": 0.0003409259259259259, | |
| "loss": 3.9597, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.3556627232311054, | |
| "grad_norm": 0.29691699147224426, | |
| "learning_rate": 0.0003372222222222222, | |
| "loss": 3.9525, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.36169090498078516, | |
| "grad_norm": 0.2951587438583374, | |
| "learning_rate": 0.0003335185185185185, | |
| "loss": 3.9489, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.36169090498078516, | |
| "eval_loss": 3.8972609229729485, | |
| "eval_perplexity": 49.26731714501557, | |
| "eval_runtime": 112.1012, | |
| "eval_samples_per_second": 14.799, | |
| "eval_steps_per_second": 0.232, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.36771908673046494, | |
| "grad_norm": 0.28599607944488525, | |
| "learning_rate": 0.00032981481481481485, | |
| "loss": 3.9341, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.37374726848014467, | |
| "grad_norm": 0.44271770119667053, | |
| "learning_rate": 0.0003261111111111111, | |
| "loss": 3.9357, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.37977545022982445, | |
| "grad_norm": 0.3313683271408081, | |
| "learning_rate": 0.00032240740740740743, | |
| "loss": 3.9205, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.3858036319795042, | |
| "grad_norm": 0.2592178285121918, | |
| "learning_rate": 0.0003187037037037037, | |
| "loss": 3.9143, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.3918318137291839, | |
| "grad_norm": 0.37822020053863525, | |
| "learning_rate": 0.000315, | |
| "loss": 3.9085, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.3978599954788637, | |
| "grad_norm": 0.33519598841667175, | |
| "learning_rate": 0.0003112962962962963, | |
| "loss": 3.8993, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.4038881772285434, | |
| "grad_norm": 0.36391812562942505, | |
| "learning_rate": 0.0003075925925925926, | |
| "loss": 3.8956, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.4099163589782232, | |
| "grad_norm": 0.2704394459724426, | |
| "learning_rate": 0.00030388888888888886, | |
| "loss": 3.889, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.41594454072790293, | |
| "grad_norm": 0.38525649905204773, | |
| "learning_rate": 0.0003001851851851852, | |
| "loss": 3.8811, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.4219727224775827, | |
| "grad_norm": 0.29260268807411194, | |
| "learning_rate": 0.00029648148148148144, | |
| "loss": 3.8735, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.42800090422726245, | |
| "grad_norm": 0.2522657811641693, | |
| "learning_rate": 0.0002927777777777778, | |
| "loss": 3.8714, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.43402908597694223, | |
| "grad_norm": 0.31784236431121826, | |
| "learning_rate": 0.0002890740740740741, | |
| "loss": 3.8596, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.44005726772662196, | |
| "grad_norm": 0.3017977476119995, | |
| "learning_rate": 0.0002853703703703704, | |
| "loss": 3.8624, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.4460854494763017, | |
| "grad_norm": 0.3564951717853546, | |
| "learning_rate": 0.00028166666666666666, | |
| "loss": 3.8571, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.45211363122598147, | |
| "grad_norm": 0.2593876123428345, | |
| "learning_rate": 0.000277962962962963, | |
| "loss": 3.8495, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.45211363122598147, | |
| "eval_loss": 3.795062538778086, | |
| "eval_perplexity": 44.481018107066156, | |
| "eval_runtime": 112.4309, | |
| "eval_samples_per_second": 14.756, | |
| "eval_steps_per_second": 0.231, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.4581418129756612, | |
| "grad_norm": 0.32840925455093384, | |
| "learning_rate": 0.00027425925925925925, | |
| "loss": 3.8425, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.464169994725341, | |
| "grad_norm": 0.3139461874961853, | |
| "learning_rate": 0.00027055555555555557, | |
| "loss": 3.839, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.4701981764750207, | |
| "grad_norm": 0.25834277272224426, | |
| "learning_rate": 0.00026685185185185183, | |
| "loss": 3.8272, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.4762263582247005, | |
| "grad_norm": 0.2804831266403198, | |
| "learning_rate": 0.00026314814814814815, | |
| "loss": 3.8248, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.4822545399743802, | |
| "grad_norm": 0.3130384087562561, | |
| "learning_rate": 0.0002594444444444444, | |
| "loss": 3.8114, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.48828272172405995, | |
| "grad_norm": 0.2702663242816925, | |
| "learning_rate": 0.0002557407407407408, | |
| "loss": 3.8205, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.49431090347373974, | |
| "grad_norm": 0.35775506496429443, | |
| "learning_rate": 0.00025203703703703705, | |
| "loss": 3.8103, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.5003390852234195, | |
| "grad_norm": 0.33006781339645386, | |
| "learning_rate": 0.0002483333333333333, | |
| "loss": 3.8069, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.5063672669730992, | |
| "grad_norm": 0.28608810901641846, | |
| "learning_rate": 0.00024462962962962963, | |
| "loss": 3.8046, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.512395448722779, | |
| "grad_norm": 0.2452249377965927, | |
| "learning_rate": 0.00024092592592592593, | |
| "loss": 3.7919, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.5184236304724588, | |
| "grad_norm": 0.29953446984291077, | |
| "learning_rate": 0.00023722222222222222, | |
| "loss": 3.7933, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.5244518122221385, | |
| "grad_norm": 0.31593525409698486, | |
| "learning_rate": 0.0002335185185185185, | |
| "loss": 3.7921, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.5304799939718182, | |
| "grad_norm": 0.37334874272346497, | |
| "learning_rate": 0.0002298148148148148, | |
| "loss": 3.7888, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.536508175721498, | |
| "grad_norm": 0.3065856695175171, | |
| "learning_rate": 0.00022611111111111112, | |
| "loss": 3.7784, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.5425363574711778, | |
| "grad_norm": 0.3357571065425873, | |
| "learning_rate": 0.0002224074074074074, | |
| "loss": 3.7812, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.5425363574711778, | |
| "eval_loss": 3.728826272039063, | |
| "eval_perplexity": 41.6302169275088, | |
| "eval_runtime": 112.1495, | |
| "eval_samples_per_second": 14.793, | |
| "eval_steps_per_second": 0.232, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.5485645392208575, | |
| "grad_norm": 0.2727905511856079, | |
| "learning_rate": 0.0002187037037037037, | |
| "loss": 3.7787, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.5545927209705372, | |
| "grad_norm": 0.25383642315864563, | |
| "learning_rate": 0.000215, | |
| "loss": 3.7697, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.560620902720217, | |
| "grad_norm": 0.2943139672279358, | |
| "learning_rate": 0.00021129629629629629, | |
| "loss": 3.7624, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.5666490844698968, | |
| "grad_norm": 0.2697204053401947, | |
| "learning_rate": 0.0002075925925925926, | |
| "loss": 3.7629, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.5726772662195765, | |
| "grad_norm": 0.2720482349395752, | |
| "learning_rate": 0.0002038888888888889, | |
| "loss": 3.7545, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.5787054479692563, | |
| "grad_norm": 0.3047272264957428, | |
| "learning_rate": 0.0002001851851851852, | |
| "loss": 3.7567, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.584733629718936, | |
| "grad_norm": 0.28646621108055115, | |
| "learning_rate": 0.00019648148148148148, | |
| "loss": 3.7521, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.5907618114686157, | |
| "grad_norm": 0.27155548334121704, | |
| "learning_rate": 0.00019277777777777777, | |
| "loss": 3.7528, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.5967899932182955, | |
| "grad_norm": 0.3533537685871124, | |
| "learning_rate": 0.00018907407407407406, | |
| "loss": 3.7403, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.6028181749679753, | |
| "grad_norm": 0.29706355929374695, | |
| "learning_rate": 0.00018537037037037038, | |
| "loss": 3.7428, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.6088463567176551, | |
| "grad_norm": 0.2581825852394104, | |
| "learning_rate": 0.00018166666666666667, | |
| "loss": 3.7382, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.6148745384673348, | |
| "grad_norm": 0.278071790933609, | |
| "learning_rate": 0.00017796296296296296, | |
| "loss": 3.7328, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.6209027202170145, | |
| "grad_norm": 0.2304176539182663, | |
| "learning_rate": 0.00017425925925925926, | |
| "loss": 3.7338, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.6269309019666943, | |
| "grad_norm": 0.2794628441333771, | |
| "learning_rate": 0.00017055555555555555, | |
| "loss": 3.7316, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.632959083716374, | |
| "grad_norm": 0.2320399135351181, | |
| "learning_rate": 0.00016685185185185187, | |
| "loss": 3.7267, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.632959083716374, | |
| "eval_loss": 3.6778735519165284, | |
| "eval_perplexity": 39.5621776465433, | |
| "eval_runtime": 112.1248, | |
| "eval_samples_per_second": 14.796, | |
| "eval_steps_per_second": 0.232, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.6389872654660538, | |
| "grad_norm": 0.26810985803604126, | |
| "learning_rate": 0.00016314814814814816, | |
| "loss": 3.7259, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.6450154472157336, | |
| "grad_norm": 0.28521475195884705, | |
| "learning_rate": 0.00015944444444444445, | |
| "loss": 3.7242, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.6510436289654133, | |
| "grad_norm": 0.24218100309371948, | |
| "learning_rate": 0.00015574074074074074, | |
| "loss": 3.7209, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.657071810715093, | |
| "grad_norm": 0.2587539553642273, | |
| "learning_rate": 0.00015203703703703703, | |
| "loss": 3.7198, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.6630999924647728, | |
| "grad_norm": 0.29283982515335083, | |
| "learning_rate": 0.00014833333333333335, | |
| "loss": 3.7162, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.6691281742144526, | |
| "grad_norm": 0.23661203682422638, | |
| "learning_rate": 0.00014462962962962964, | |
| "loss": 3.714, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.6751563559641324, | |
| "grad_norm": 0.2184392660856247, | |
| "learning_rate": 0.00014092592592592594, | |
| "loss": 3.7121, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.681184537713812, | |
| "grad_norm": 0.24910466372966766, | |
| "learning_rate": 0.00013722222222222223, | |
| "loss": 3.6996, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.6872127194634918, | |
| "grad_norm": 0.2523048222064972, | |
| "learning_rate": 0.00013351851851851852, | |
| "loss": 3.7068, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.6932409012131716, | |
| "grad_norm": 0.25991636514663696, | |
| "learning_rate": 0.00012981481481481484, | |
| "loss": 3.6997, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.6992690829628513, | |
| "grad_norm": 0.2645404636859894, | |
| "learning_rate": 0.00012611111111111113, | |
| "loss": 3.6931, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.7052972647125311, | |
| "grad_norm": 0.24797891080379486, | |
| "learning_rate": 0.00012240740740740742, | |
| "loss": 3.6951, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.7113254464622109, | |
| "grad_norm": 0.24686554074287415, | |
| "learning_rate": 0.0001187037037037037, | |
| "loss": 3.6944, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.7173536282118906, | |
| "grad_norm": 0.25571176409721375, | |
| "learning_rate": 0.000115, | |
| "loss": 3.6853, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.7233818099615703, | |
| "grad_norm": 0.23296213150024414, | |
| "learning_rate": 0.0001112962962962963, | |
| "loss": 3.6937, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.7233818099615703, | |
| "eval_loss": 3.6413854997651955, | |
| "eval_perplexity": 38.14464953385645, | |
| "eval_runtime": 112.0529, | |
| "eval_samples_per_second": 14.806, | |
| "eval_steps_per_second": 0.232, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.7294099917112501, | |
| "grad_norm": 0.23208530247211456, | |
| "learning_rate": 0.00010759259259259259, | |
| "loss": 3.6898, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.7354381734609299, | |
| "grad_norm": 0.2420375943183899, | |
| "learning_rate": 0.00010388888888888889, | |
| "loss": 3.6874, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.7414663552106096, | |
| "grad_norm": 0.2512781620025635, | |
| "learning_rate": 0.00010018518518518518, | |
| "loss": 3.6881, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.7474945369602893, | |
| "grad_norm": 0.20650093257427216, | |
| "learning_rate": 9.648148148148149e-05, | |
| "loss": 3.6808, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.7535227187099691, | |
| "grad_norm": 0.2324400097131729, | |
| "learning_rate": 9.277777777777778e-05, | |
| "loss": 3.6816, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.7595509004596489, | |
| "grad_norm": 0.21381932497024536, | |
| "learning_rate": 8.907407407407407e-05, | |
| "loss": 3.6802, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.7655790822093286, | |
| "grad_norm": 0.22228114306926727, | |
| "learning_rate": 8.537037037037038e-05, | |
| "loss": 3.68, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.7716072639590084, | |
| "grad_norm": 0.21108661592006683, | |
| "learning_rate": 8.166666666666667e-05, | |
| "loss": 3.6752, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.7776354457086881, | |
| "grad_norm": 0.20819643139839172, | |
| "learning_rate": 7.796296296296296e-05, | |
| "loss": 3.6737, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.7836636274583678, | |
| "grad_norm": 0.21848240494728088, | |
| "learning_rate": 7.425925925925927e-05, | |
| "loss": 3.6672, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.7896918092080476, | |
| "grad_norm": 0.2202238291501999, | |
| "learning_rate": 7.055555555555556e-05, | |
| "loss": 3.6735, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.7957199909577274, | |
| "grad_norm": 0.19879350066184998, | |
| "learning_rate": 6.685185185185186e-05, | |
| "loss": 3.6707, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.8017481727074072, | |
| "grad_norm": 0.204148530960083, | |
| "learning_rate": 6.314814814814815e-05, | |
| "loss": 3.6701, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.8077763544570868, | |
| "grad_norm": 0.2012234628200531, | |
| "learning_rate": 5.9444444444444445e-05, | |
| "loss": 3.6627, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.8138045362067666, | |
| "grad_norm": 0.1901106983423233, | |
| "learning_rate": 5.5740740740740744e-05, | |
| "loss": 3.6656, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.8138045362067666, | |
| "eval_loss": 3.6161044157228823, | |
| "eval_perplexity": 37.192399121937946, | |
| "eval_runtime": 112.241, | |
| "eval_samples_per_second": 14.781, | |
| "eval_steps_per_second": 0.232, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.8198327179564464, | |
| "grad_norm": 0.19361631572246552, | |
| "learning_rate": 5.2037037037037035e-05, | |
| "loss": 3.6636, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.8258608997061262, | |
| "grad_norm": 0.1905573159456253, | |
| "learning_rate": 4.8333333333333334e-05, | |
| "loss": 3.6658, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.8318890814558059, | |
| "grad_norm": 0.18366636335849762, | |
| "learning_rate": 4.462962962962963e-05, | |
| "loss": 3.6613, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.8379172632054857, | |
| "grad_norm": 0.18855422735214233, | |
| "learning_rate": 4.092592592592593e-05, | |
| "loss": 3.6594, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.8439454449551654, | |
| "grad_norm": 0.18520458042621613, | |
| "learning_rate": 3.722222222222222e-05, | |
| "loss": 3.662, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.8499736267048451, | |
| "grad_norm": 0.17435666918754578, | |
| "learning_rate": 3.351851851851852e-05, | |
| "loss": 3.6639, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.8560018084545249, | |
| "grad_norm": 0.1787436455488205, | |
| "learning_rate": 2.9814814814814815e-05, | |
| "loss": 3.6559, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.8620299902042047, | |
| "grad_norm": 0.1714988499879837, | |
| "learning_rate": 2.6111111111111114e-05, | |
| "loss": 3.6534, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.8680581719538845, | |
| "grad_norm": 0.1794491559267044, | |
| "learning_rate": 2.240740740740741e-05, | |
| "loss": 3.6575, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.8740863537035641, | |
| "grad_norm": 0.17227113246917725, | |
| "learning_rate": 1.8703703703703707e-05, | |
| "loss": 3.6525, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.8801145354532439, | |
| "grad_norm": 0.16660287976264954, | |
| "learning_rate": 1.5e-05, | |
| "loss": 3.6569, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.8861427172029237, | |
| "grad_norm": 0.16066555678844452, | |
| "learning_rate": 1.1296296296296295e-05, | |
| "loss": 3.6477, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.8921708989526034, | |
| "grad_norm": 0.160615935921669, | |
| "learning_rate": 7.592592592592593e-06, | |
| "loss": 3.644, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.8981990807022832, | |
| "grad_norm": 0.15302099287509918, | |
| "learning_rate": 3.888888888888889e-06, | |
| "loss": 3.6517, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.9042272624519629, | |
| "grad_norm": 0.15698660910129547, | |
| "learning_rate": 1.8518518518518518e-07, | |
| "loss": 3.6558, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.9042272624519629, | |
| "eval_loss": 3.602764652887152, | |
| "eval_perplexity": 36.6995558529248, | |
| "eval_runtime": 112.1149, | |
| "eval_samples_per_second": 14.797, | |
| "eval_steps_per_second": 0.232, | |
| "step": 3000 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 3000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 300, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.537800155430912e+18, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |