| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.4853125, |
| "eval_steps": 25, |
| "global_step": 150, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.01, |
| "grad_norm": 11.566158518685013, |
| "learning_rate": 5.263157894736843e-07, |
| "loss": 1.5926, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.01, |
| "eval_loss": 2.57490611076355, |
| "eval_runtime": 101.999, |
| "eval_samples_per_second": 13.373, |
| "eval_steps_per_second": 3.343, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.02, |
| "grad_norm": 9.445195266531163, |
| "learning_rate": 1.0526315789473685e-06, |
| "loss": 1.6027, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.03, |
| "grad_norm": 12.468540256646872, |
| "learning_rate": 1.5789473684210526e-06, |
| "loss": 1.6071, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.04, |
| "grad_norm": 14.056638393452099, |
| "learning_rate": 2.105263157894737e-06, |
| "loss": 1.5925, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.05, |
| "grad_norm": 17.907271199793218, |
| "learning_rate": 2.631578947368421e-06, |
| "loss": 1.5882, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.06, |
| "grad_norm": 10.874515175224529, |
| "learning_rate": 3.157894736842105e-06, |
| "loss": 1.5566, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.07, |
| "grad_norm": 7.2991712424563575, |
| "learning_rate": 3.6842105263157896e-06, |
| "loss": 1.5202, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.08, |
| "grad_norm": 8.308718463629434, |
| "learning_rate": 4.210526315789474e-06, |
| "loss": 1.5206, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.09, |
| "grad_norm": 8.241786544815596, |
| "learning_rate": 4.736842105263158e-06, |
| "loss": 1.5149, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.1, |
| "grad_norm": 4.516814669266343, |
| "learning_rate": 5.263157894736842e-06, |
| "loss": 1.4843, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.11, |
| "grad_norm": 1.8837153037302374, |
| "learning_rate": 5.789473684210527e-06, |
| "loss": 1.4957, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.12, |
| "grad_norm": 1.2975694701034297, |
| "learning_rate": 6.31578947368421e-06, |
| "loss": 1.4315, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.13, |
| "grad_norm": 2.645783460157163, |
| "learning_rate": 6.842105263157896e-06, |
| "loss": 1.437, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.14, |
| "grad_norm": 2.3252432036353, |
| "learning_rate": 7.368421052631579e-06, |
| "loss": 1.4225, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.15, |
| "grad_norm": 1.7474221172201105, |
| "learning_rate": 7.894736842105265e-06, |
| "loss": 1.4368, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.16, |
| "grad_norm": 1.5068557456339307, |
| "learning_rate": 8.421052631578948e-06, |
| "loss": 1.3822, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.17, |
| "grad_norm": 1.4115398495070288, |
| "learning_rate": 8.947368421052632e-06, |
| "loss": 1.3843, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.18, |
| "grad_norm": 1.3912456051531092, |
| "learning_rate": 9.473684210526315e-06, |
| "loss": 1.3675, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.19, |
| "grad_norm": 1.186758773014767, |
| "learning_rate": 1e-05, |
| "loss": 1.4011, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.2, |
| "grad_norm": 1.1393283985637506, |
| "learning_rate": 9.999246866958693e-06, |
| "loss": 1.4036, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.21, |
| "grad_norm": 0.8995358814489449, |
| "learning_rate": 9.99698769471852e-06, |
| "loss": 1.3586, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.22, |
| "grad_norm": 1.0569718242603094, |
| "learning_rate": 9.993223163862385e-06, |
| "loss": 1.3567, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.23, |
| "grad_norm": 0.9025470054457645, |
| "learning_rate": 9.98795440846732e-06, |
| "loss": 1.3699, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.24, |
| "grad_norm": 0.7991626020662913, |
| "learning_rate": 9.981183015762831e-06, |
| "loss": 1.334, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.25, |
| "grad_norm": 0.8467742545448407, |
| "learning_rate": 9.972911025652754e-06, |
| "loss": 1.3248, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.25, |
| "eval_loss": 2.339007616043091, |
| "eval_runtime": 101.6007, |
| "eval_samples_per_second": 13.425, |
| "eval_steps_per_second": 3.356, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.26, |
| "grad_norm": 0.799819401966694, |
| "learning_rate": 9.963140930100713e-06, |
| "loss": 1.3379, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.27, |
| "grad_norm": 0.761093653803086, |
| "learning_rate": 9.951875672379424e-06, |
| "loss": 1.3112, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.28, |
| "grad_norm": 0.7954388946463846, |
| "learning_rate": 9.939118646184007e-06, |
| "loss": 1.3232, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.29, |
| "grad_norm": 0.740339753543114, |
| "learning_rate": 9.924873694609636e-06, |
| "loss": 1.3307, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.3, |
| "grad_norm": 0.7044993492523487, |
| "learning_rate": 9.909145108993794e-06, |
| "loss": 1.3399, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.31, |
| "grad_norm": 0.7640322477664385, |
| "learning_rate": 9.891937627623486e-06, |
| "loss": 1.3038, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.32, |
| "grad_norm": 0.7262497385764958, |
| "learning_rate": 9.873256434307828e-06, |
| "loss": 1.3087, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.33, |
| "grad_norm": 0.7533248019124158, |
| "learning_rate": 9.853107156816393e-06, |
| "loss": 1.3112, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.34, |
| "grad_norm": 0.8422733691872109, |
| "learning_rate": 9.831495865183832e-06, |
| "loss": 1.3114, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.35, |
| "grad_norm": 0.8012394357532521, |
| "learning_rate": 9.808429069881267e-06, |
| "loss": 1.2835, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.36, |
| "grad_norm": 0.7000870401208915, |
| "learning_rate": 9.783913719854977e-06, |
| "loss": 1.276, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.37, |
| "grad_norm": 0.7621452978518404, |
| "learning_rate": 9.757957200433011e-06, |
| "loss": 1.2975, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.38, |
| "grad_norm": 0.7161333100837386, |
| "learning_rate": 9.730567331100333e-06, |
| "loss": 1.3196, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.39, |
| "grad_norm": 0.702335306780222, |
| "learning_rate": 9.701752363143183e-06, |
| "loss": 1.2863, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.4, |
| "grad_norm": 0.7478344073066684, |
| "learning_rate": 9.67152097716334e-06, |
| "loss": 1.2664, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.41, |
| "grad_norm": 0.7850826821316415, |
| "learning_rate": 9.639882280463071e-06, |
| "loss": 1.2899, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.42, |
| "grad_norm": 0.6636731593357432, |
| "learning_rate": 9.606845804301523e-06, |
| "loss": 1.2686, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.43, |
| "grad_norm": 0.6928945928752355, |
| "learning_rate": 9.572421501023403e-06, |
| "loss": 1.2762, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.44, |
| "grad_norm": 0.7057866334082888, |
| "learning_rate": 9.536619741060799e-06, |
| "loss": 1.2834, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.45, |
| "grad_norm": 0.7045044736070911, |
| "learning_rate": 9.499451309809058e-06, |
| "loss": 1.241, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.46, |
| "grad_norm": 0.6976063766037892, |
| "learning_rate": 9.460927404377647e-06, |
| "loss": 1.2379, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.47, |
| "grad_norm": 0.7353201696220303, |
| "learning_rate": 9.421059630216992e-06, |
| "loss": 1.2542, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.48, |
| "grad_norm": 0.6999623896459112, |
| "learning_rate": 9.37985999762229e-06, |
| "loss": 1.256, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.49, |
| "grad_norm": 0.6909158479517368, |
| "learning_rate": 9.337340918115385e-06, |
| "loss": 1.2537, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.5, |
| "grad_norm": 0.691103492023218, |
| "learning_rate": 9.29351520070574e-06, |
| "loss": 1.2642, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.5, |
| "eval_loss": 2.2907779216766357, |
| "eval_runtime": 101.7172, |
| "eval_samples_per_second": 13.41, |
| "eval_steps_per_second": 3.352, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.51, |
| "grad_norm": 0.6826943593281288, |
| "learning_rate": 9.24839604803169e-06, |
| "loss": 1.2748, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.52, |
| "grad_norm": 0.7706665613331782, |
| "learning_rate": 9.201997052383107e-06, |
| "loss": 1.2636, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.53, |
| "grad_norm": 0.6492853598477402, |
| "learning_rate": 9.154332191606671e-06, |
| "loss": 1.2302, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.54, |
| "grad_norm": 0.7072381497324032, |
| "learning_rate": 9.105415824895008e-06, |
| "loss": 1.2746, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.55, |
| "grad_norm": 0.6748157276982609, |
| "learning_rate": 9.055262688460931e-06, |
| "loss": 1.2554, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.56, |
| "grad_norm": 0.7105138755460529, |
| "learning_rate": 9.003887891098108e-06, |
| "loss": 1.2106, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.57, |
| "grad_norm": 0.7376331313243213, |
| "learning_rate": 8.951306909629492e-06, |
| "loss": 1.2201, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.58, |
| "grad_norm": 0.6797243894530027, |
| "learning_rate": 8.89753558424488e-06, |
| "loss": 1.2671, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.59, |
| "grad_norm": 0.6880139084560465, |
| "learning_rate": 8.842590113729001e-06, |
| "loss": 1.2457, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.6, |
| "grad_norm": 0.6302506320753319, |
| "learning_rate": 8.786487050581583e-06, |
| "loss": 1.2309, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.61, |
| "grad_norm": 0.6711490529071925, |
| "learning_rate": 8.729243296030851e-06, |
| "loss": 1.2071, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.62, |
| "grad_norm": 0.6454515943516826, |
| "learning_rate": 8.670876094941991e-06, |
| "loss": 1.2246, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.63, |
| "grad_norm": 0.6200154305292742, |
| "learning_rate": 8.611403030622074e-06, |
| "loss": 1.2319, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.64, |
| "grad_norm": 0.676378007678565, |
| "learning_rate": 8.55084201952302e-06, |
| "loss": 1.2403, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.65, |
| "grad_norm": 0.6168431268474576, |
| "learning_rate": 8.489211305844216e-06, |
| "loss": 1.2246, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.66, |
| "grad_norm": 0.6768307579248142, |
| "learning_rate": 8.4265294560364e-06, |
| "loss": 1.233, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.67, |
| "grad_norm": 0.6388639587914229, |
| "learning_rate": 8.362815353208441e-06, |
| "loss": 1.2555, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.68, |
| "grad_norm": 0.6497675316556933, |
| "learning_rate": 8.298088191438753e-06, |
| "loss": 1.2652, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.69, |
| "grad_norm": 0.7546681097845381, |
| "learning_rate": 8.23236746999302e-06, |
| "loss": 1.1981, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.7, |
| "grad_norm": 0.5956365157182328, |
| "learning_rate": 8.165672987449962e-06, |
| "loss": 1.2387, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.71, |
| "grad_norm": 0.7611636454374872, |
| "learning_rate": 8.098024835736977e-06, |
| "loss": 1.2255, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.72, |
| "grad_norm": 0.5900696948196736, |
| "learning_rate": 8.029443394077356e-06, |
| "loss": 1.2212, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.73, |
| "grad_norm": 0.7690074097436085, |
| "learning_rate": 7.959949322850994e-06, |
| "loss": 1.1815, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.74, |
| "grad_norm": 0.628966832946564, |
| "learning_rate": 7.889563557370378e-06, |
| "loss": 1.2112, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.75, |
| "grad_norm": 0.6731895480734998, |
| "learning_rate": 7.818307301573757e-06, |
| "loss": 1.2088, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.75, |
| "eval_loss": 2.247450590133667, |
| "eval_runtime": 101.7736, |
| "eval_samples_per_second": 13.402, |
| "eval_steps_per_second": 3.351, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.76, |
| "grad_norm": 0.6228635592692154, |
| "learning_rate": 7.746202021637385e-06, |
| "loss": 1.2348, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.77, |
| "grad_norm": 0.6568692402221102, |
| "learning_rate": 7.67326943950877e-06, |
| "loss": 1.2098, |
| "step": 77 |
| }, |
| { |
| "epoch": 0.78, |
| "grad_norm": 0.5802460088604465, |
| "learning_rate": 7.599531526362873e-06, |
| "loss": 1.2043, |
| "step": 78 |
| }, |
| { |
| "epoch": 0.79, |
| "grad_norm": 0.5996845997396012, |
| "learning_rate": 7.525010495983202e-06, |
| "loss": 1.2038, |
| "step": 79 |
| }, |
| { |
| "epoch": 0.8, |
| "grad_norm": 0.5933543648635481, |
| "learning_rate": 7.449728798069864e-06, |
| "loss": 1.2144, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.81, |
| "grad_norm": 0.5914083388025546, |
| "learning_rate": 7.373709111476498e-06, |
| "loss": 1.2167, |
| "step": 81 |
| }, |
| { |
| "epoch": 0.82, |
| "grad_norm": 0.6156067665398204, |
| "learning_rate": 7.296974337378209e-06, |
| "loss": 1.2235, |
| "step": 82 |
| }, |
| { |
| "epoch": 0.83, |
| "grad_norm": 0.5449806588736864, |
| "learning_rate": 7.219547592372512e-06, |
| "loss": 1.1799, |
| "step": 83 |
| }, |
| { |
| "epoch": 0.84, |
| "grad_norm": 0.5856716887203739, |
| "learning_rate": 7.141452201515386e-06, |
| "loss": 1.2157, |
| "step": 84 |
| }, |
| { |
| "epoch": 0.85, |
| "grad_norm": 0.550531722964763, |
| "learning_rate": 7.062711691294525e-06, |
| "loss": 1.203, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.86, |
| "grad_norm": 0.6058964662304129, |
| "learning_rate": 6.983349782541901e-06, |
| "loss": 1.2156, |
| "step": 86 |
| }, |
| { |
| "epoch": 0.87, |
| "grad_norm": 0.5538171332714614, |
| "learning_rate": 6.903390383287795e-06, |
| "loss": 1.2028, |
| "step": 87 |
| }, |
| { |
| "epoch": 0.88, |
| "grad_norm": 0.63135180430502, |
| "learning_rate": 6.822857581558423e-06, |
| "loss": 1.1914, |
| "step": 88 |
| }, |
| { |
| "epoch": 0.89, |
| "grad_norm": 0.5284303806093219, |
| "learning_rate": 6.741775638119345e-06, |
| "loss": 1.1821, |
| "step": 89 |
| }, |
| { |
| "epoch": 0.9, |
| "grad_norm": 0.5655951173352003, |
| "learning_rate": 6.66016897916682e-06, |
| "loss": 1.1763, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.91, |
| "grad_norm": 0.5694584615689945, |
| "learning_rate": 6.57806218896935e-06, |
| "loss": 1.1994, |
| "step": 91 |
| }, |
| { |
| "epoch": 0.92, |
| "grad_norm": 0.5875806897556083, |
| "learning_rate": 6.495480002461577e-06, |
| "loss": 1.2089, |
| "step": 92 |
| }, |
| { |
| "epoch": 0.93, |
| "grad_norm": 0.5772988824245247, |
| "learning_rate": 6.412447297792818e-06, |
| "loss": 1.2169, |
| "step": 93 |
| }, |
| { |
| "epoch": 0.94, |
| "grad_norm": 0.5520580922569894, |
| "learning_rate": 6.328989088832431e-06, |
| "loss": 1.2293, |
| "step": 94 |
| }, |
| { |
| "epoch": 0.95, |
| "grad_norm": 0.6323511804403603, |
| "learning_rate": 6.245130517634307e-06, |
| "loss": 1.183, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.96, |
| "grad_norm": 0.5674890093965291, |
| "learning_rate": 6.160896846862754e-06, |
| "loss": 1.1936, |
| "step": 96 |
| }, |
| { |
| "epoch": 0.97, |
| "grad_norm": 0.5631472701881443, |
| "learning_rate": 6.076313452182033e-06, |
| "loss": 1.2032, |
| "step": 97 |
| }, |
| { |
| "epoch": 0.98, |
| "grad_norm": 0.5171932549909163, |
| "learning_rate": 5.991405814611855e-06, |
| "loss": 1.1751, |
| "step": 98 |
| }, |
| { |
| "epoch": 0.99, |
| "grad_norm": 0.6057671011918265, |
| "learning_rate": 5.9061995128511455e-06, |
| "loss": 1.2261, |
| "step": 99 |
| }, |
| { |
| "epoch": 1.0, |
| "grad_norm": 0.507077055743443, |
| "learning_rate": 5.820720215572375e-06, |
| "loss": 1.2043, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.0, |
| "eval_loss": 2.230144500732422, |
| "eval_runtime": 101.7844, |
| "eval_samples_per_second": 13.401, |
| "eval_steps_per_second": 3.35, |
| "step": 100 |
| }, |
| { |
| "epoch": 1.01, |
| "grad_norm": 0.5812309118368538, |
| "learning_rate": 5.734993673688801e-06, |
| "loss": 1.2084, |
| "step": 101 |
| }, |
| { |
| "epoch": 1.0053125, |
| "grad_norm": 0.6456188239826643, |
| "learning_rate": 5.6490457125969035e-06, |
| "loss": 1.1291, |
| "step": 102 |
| }, |
| { |
| "epoch": 1.0153125, |
| "grad_norm": 0.6773326608124808, |
| "learning_rate": 5.562902224396416e-06, |
| "loss": 1.1395, |
| "step": 103 |
| }, |
| { |
| "epoch": 1.0253125, |
| "grad_norm": 0.7477360145152014, |
| "learning_rate": 5.476589160090238e-06, |
| "loss": 1.1521, |
| "step": 104 |
| }, |
| { |
| "epoch": 1.0353125, |
| "grad_norm": 0.6791382128036062, |
| "learning_rate": 5.390132521766626e-06, |
| "loss": 1.1042, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.0453125, |
| "grad_norm": 0.6766906884663769, |
| "learning_rate": 5.30355835476596e-06, |
| "loss": 1.1253, |
| "step": 106 |
| }, |
| { |
| "epoch": 1.0553125, |
| "grad_norm": 0.6552524158086009, |
| "learning_rate": 5.216892739834519e-06, |
| "loss": 1.1134, |
| "step": 107 |
| }, |
| { |
| "epoch": 1.0653125, |
| "grad_norm": 0.6715767756589318, |
| "learning_rate": 5.13016178526756e-06, |
| "loss": 1.1227, |
| "step": 108 |
| }, |
| { |
| "epoch": 1.0753125, |
| "grad_norm": 0.66379936996141, |
| "learning_rate": 5.043391619044122e-06, |
| "loss": 1.1206, |
| "step": 109 |
| }, |
| { |
| "epoch": 1.0853125, |
| "grad_norm": 0.5851609238678033, |
| "learning_rate": 4.956608380955877e-06, |
| "loss": 1.131, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.0953125, |
| "grad_norm": 0.6316936042931867, |
| "learning_rate": 4.869838214732441e-06, |
| "loss": 1.0741, |
| "step": 111 |
| }, |
| { |
| "epoch": 1.1053125, |
| "grad_norm": 0.5876684242005867, |
| "learning_rate": 4.783107260165483e-06, |
| "loss": 1.114, |
| "step": 112 |
| }, |
| { |
| "epoch": 1.1153125, |
| "grad_norm": 0.5392050170169478, |
| "learning_rate": 4.696441645234042e-06, |
| "loss": 1.1036, |
| "step": 113 |
| }, |
| { |
| "epoch": 1.1253125, |
| "grad_norm": 0.646452087623636, |
| "learning_rate": 4.609867478233377e-06, |
| "loss": 1.1174, |
| "step": 114 |
| }, |
| { |
| "epoch": 1.1353125, |
| "grad_norm": 0.5430718729275932, |
| "learning_rate": 4.523410839909764e-06, |
| "loss": 1.1192, |
| "step": 115 |
| }, |
| { |
| "epoch": 1.1453125, |
| "grad_norm": 0.597141606799086, |
| "learning_rate": 4.437097775603587e-06, |
| "loss": 1.1368, |
| "step": 116 |
| }, |
| { |
| "epoch": 1.1553125, |
| "grad_norm": 0.5891457988432474, |
| "learning_rate": 4.350954287403099e-06, |
| "loss": 1.1011, |
| "step": 117 |
| }, |
| { |
| "epoch": 1.1653125, |
| "grad_norm": 0.578994264391102, |
| "learning_rate": 4.265006326311199e-06, |
| "loss": 1.1163, |
| "step": 118 |
| }, |
| { |
| "epoch": 1.1753125, |
| "grad_norm": 0.6131297633318765, |
| "learning_rate": 4.179279784427625e-06, |
| "loss": 1.1176, |
| "step": 119 |
| }, |
| { |
| "epoch": 1.1853125, |
| "grad_norm": 0.5424229334496232, |
| "learning_rate": 4.093800487148857e-06, |
| "loss": 1.1296, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.1953125, |
| "grad_norm": 0.5700231870921876, |
| "learning_rate": 4.008594185388146e-06, |
| "loss": 1.1115, |
| "step": 121 |
| }, |
| { |
| "epoch": 1.2053125, |
| "grad_norm": 0.5721172147422489, |
| "learning_rate": 3.9236865478179685e-06, |
| "loss": 1.1336, |
| "step": 122 |
| }, |
| { |
| "epoch": 1.2153125, |
| "grad_norm": 0.5208504762272286, |
| "learning_rate": 3.839103153137247e-06, |
| "loss": 1.1315, |
| "step": 123 |
| }, |
| { |
| "epoch": 1.2253125, |
| "grad_norm": 0.583935859982723, |
| "learning_rate": 3.7548694823656945e-06, |
| "loss": 1.124, |
| "step": 124 |
| }, |
| { |
| "epoch": 1.2353125, |
| "grad_norm": 0.5128004092566397, |
| "learning_rate": 3.671010911167572e-06, |
| "loss": 1.1105, |
| "step": 125 |
| }, |
| { |
| "epoch": 1.2353125, |
| "eval_loss": 2.2433762550354004, |
| "eval_runtime": 101.7452, |
| "eval_samples_per_second": 13.406, |
| "eval_steps_per_second": 3.352, |
| "step": 125 |
| }, |
| { |
| "epoch": 1.2453125, |
| "grad_norm": 0.5439442747202429, |
| "learning_rate": 3.5875527022071808e-06, |
| "loss": 1.1211, |
| "step": 126 |
| }, |
| { |
| "epoch": 1.2553125, |
| "grad_norm": 0.5418915161743365, |
| "learning_rate": 3.5045199975384225e-06, |
| "loss": 1.0848, |
| "step": 127 |
| }, |
| { |
| "epoch": 1.2653125, |
| "grad_norm": 0.5119029994456726, |
| "learning_rate": 3.4219378110306523e-06, |
| "loss": 1.1004, |
| "step": 128 |
| }, |
| { |
| "epoch": 1.2753125, |
| "grad_norm": 0.532653861592276, |
| "learning_rate": 3.3398310208331806e-06, |
| "loss": 1.1008, |
| "step": 129 |
| }, |
| { |
| "epoch": 1.2853125, |
| "grad_norm": 0.5440576197306609, |
| "learning_rate": 3.2582243618806574e-06, |
| "loss": 1.1116, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.2953125, |
| "grad_norm": 0.5357844018820976, |
| "learning_rate": 3.177142418441578e-06, |
| "loss": 1.0874, |
| "step": 131 |
| }, |
| { |
| "epoch": 1.3053124999999999, |
| "grad_norm": 0.5099622635134573, |
| "learning_rate": 3.096609616712207e-06, |
| "loss": 1.125, |
| "step": 132 |
| }, |
| { |
| "epoch": 1.3153125, |
| "grad_norm": 0.5011401783480428, |
| "learning_rate": 3.0166502174581012e-06, |
| "loss": 1.096, |
| "step": 133 |
| }, |
| { |
| "epoch": 1.3253125, |
| "grad_norm": 0.5199335168835749, |
| "learning_rate": 2.937288308705475e-06, |
| "loss": 1.1196, |
| "step": 134 |
| }, |
| { |
| "epoch": 1.3353125000000001, |
| "grad_norm": 0.5085930844982837, |
| "learning_rate": 2.858547798484613e-06, |
| "loss": 1.0749, |
| "step": 135 |
| }, |
| { |
| "epoch": 1.3453125, |
| "grad_norm": 0.5078342161970899, |
| "learning_rate": 2.7804524076274898e-06, |
| "loss": 1.122, |
| "step": 136 |
| }, |
| { |
| "epoch": 1.3553125, |
| "grad_norm": 0.5289426511308486, |
| "learning_rate": 2.7030256626217932e-06, |
| "loss": 1.1074, |
| "step": 137 |
| }, |
| { |
| "epoch": 1.3653125, |
| "grad_norm": 0.5057579305180265, |
| "learning_rate": 2.6262908885235046e-06, |
| "loss": 1.1256, |
| "step": 138 |
| }, |
| { |
| "epoch": 1.3753125, |
| "grad_norm": 0.5187731221107936, |
| "learning_rate": 2.550271201930136e-06, |
| "loss": 1.1215, |
| "step": 139 |
| }, |
| { |
| "epoch": 1.3853125, |
| "grad_norm": 0.4931893467192466, |
| "learning_rate": 2.474989504016798e-06, |
| "loss": 1.1013, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.3953125, |
| "grad_norm": 0.49282493936461075, |
| "learning_rate": 2.4004684736371276e-06, |
| "loss": 1.1081, |
| "step": 141 |
| }, |
| { |
| "epoch": 1.4053125, |
| "grad_norm": 0.5010941951143336, |
| "learning_rate": 2.32673056049123e-06, |
| "loss": 1.1025, |
| "step": 142 |
| }, |
| { |
| "epoch": 1.4153125, |
| "grad_norm": 0.5158880948095923, |
| "learning_rate": 2.253797978362617e-06, |
| "loss": 1.1197, |
| "step": 143 |
| }, |
| { |
| "epoch": 1.4253125, |
| "grad_norm": 0.49950406128170766, |
| "learning_rate": 2.1816926984262454e-06, |
| "loss": 1.0871, |
| "step": 144 |
| }, |
| { |
| "epoch": 1.4353125, |
| "grad_norm": 0.5273902595696414, |
| "learning_rate": 2.1104364426296237e-06, |
| "loss": 1.0956, |
| "step": 145 |
| }, |
| { |
| "epoch": 1.4453125, |
| "grad_norm": 0.508477801401554, |
| "learning_rate": 2.040050677149008e-06, |
| "loss": 1.0671, |
| "step": 146 |
| }, |
| { |
| "epoch": 1.4553125, |
| "grad_norm": 0.49311797703916466, |
| "learning_rate": 1.970556605922645e-06, |
| "loss": 1.0934, |
| "step": 147 |
| }, |
| { |
| "epoch": 1.4653125, |
| "grad_norm": 0.5162094705766576, |
| "learning_rate": 1.9019751642630252e-06, |
| "loss": 1.0974, |
| "step": 148 |
| }, |
| { |
| "epoch": 1.4753125, |
| "grad_norm": 0.5127608503641771, |
| "learning_rate": 1.8343270125500379e-06, |
| "loss": 1.1011, |
| "step": 149 |
| }, |
| { |
| "epoch": 1.4853125, |
| "grad_norm": 0.5052689940443171, |
| "learning_rate": 1.7676325300069824e-06, |
| "loss": 1.1253, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.4853125, |
| "eval_loss": 2.238349437713623, |
| "eval_runtime": 101.7261, |
| "eval_samples_per_second": 13.409, |
| "eval_steps_per_second": 3.352, |
| "step": 150 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 200, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 2, |
| "save_steps": 50, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 3.6037676702787174e+17, |
| "train_batch_size": 2, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|