bashgemma-270m / checkpoint-1600 /trainer_state.json
thinkthink-dev's picture
Upload folder using huggingface_hub
d482ce9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3975535168195719,
"eval_steps": 500,
"global_step": 1600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00436871996505024,
"grad_norm": 9.839941024780273,
"learning_rate": 8e-05,
"loss": 2.5246,
"step": 5
},
{
"epoch": 0.00873743993010048,
"grad_norm": 13.773455619812012,
"learning_rate": 0.00018,
"loss": 1.1343,
"step": 10
},
{
"epoch": 0.01310615989515072,
"grad_norm": 5.6580424308776855,
"learning_rate": 0.0001999997582552296,
"loss": 0.7712,
"step": 15
},
{
"epoch": 0.01747487986020096,
"grad_norm": 5.294467926025391,
"learning_rate": 0.0001999987761691029,
"loss": 0.73,
"step": 20
},
{
"epoch": 0.021843599825251202,
"grad_norm": 2.8633503913879395,
"learning_rate": 0.00019999703863998527,
"loss": 0.7289,
"step": 25
},
{
"epoch": 0.02621231979030144,
"grad_norm": 3.2836177349090576,
"learning_rate": 0.00019999454568100293,
"loss": 0.4686,
"step": 30
},
{
"epoch": 0.03058103975535168,
"grad_norm": 4.878258228302002,
"learning_rate": 0.00019999129731098898,
"loss": 0.6629,
"step": 35
},
{
"epoch": 0.03494975972040192,
"grad_norm": 2.899914026260376,
"learning_rate": 0.00019998729355448326,
"loss": 0.6038,
"step": 40
},
{
"epoch": 0.039318479685452164,
"grad_norm": 3.289844274520874,
"learning_rate": 0.00019998253444173235,
"loss": 0.4573,
"step": 45
},
{
"epoch": 0.043687199650502405,
"grad_norm": 2.957254648208618,
"learning_rate": 0.00019997702000868896,
"loss": 0.594,
"step": 50
},
{
"epoch": 0.048055919615552646,
"grad_norm": 3.171276807785034,
"learning_rate": 0.00019997075029701207,
"loss": 0.5719,
"step": 55
},
{
"epoch": 0.05242463958060288,
"grad_norm": 2.55605149269104,
"learning_rate": 0.0001999637253540663,
"loss": 0.5971,
"step": 60
},
{
"epoch": 0.05679335954565312,
"grad_norm": 2.127289295196533,
"learning_rate": 0.00019995594523292178,
"loss": 0.5712,
"step": 65
},
{
"epoch": 0.06116207951070336,
"grad_norm": 3.3928685188293457,
"learning_rate": 0.00019994740999235359,
"loss": 0.5712,
"step": 70
},
{
"epoch": 0.0655307994757536,
"grad_norm": 2.6700279712677,
"learning_rate": 0.00019993811969684142,
"loss": 0.427,
"step": 75
},
{
"epoch": 0.06989951944080385,
"grad_norm": 2.6936633586883545,
"learning_rate": 0.00019992807441656898,
"loss": 0.5321,
"step": 80
},
{
"epoch": 0.07426823940585409,
"grad_norm": 3.9897687435150146,
"learning_rate": 0.00019991727422742362,
"loss": 0.6025,
"step": 85
},
{
"epoch": 0.07863695937090433,
"grad_norm": 2.3496663570404053,
"learning_rate": 0.00019990571921099553,
"loss": 0.5975,
"step": 90
},
{
"epoch": 0.08300567933595457,
"grad_norm": 3.3796467781066895,
"learning_rate": 0.0001998934094545774,
"loss": 0.5255,
"step": 95
},
{
"epoch": 0.08737439930100481,
"grad_norm": 3.1103007793426514,
"learning_rate": 0.00019988034505116352,
"loss": 0.4946,
"step": 100
},
{
"epoch": 0.09174311926605505,
"grad_norm": 2.002304792404175,
"learning_rate": 0.00019986652609944926,
"loss": 0.425,
"step": 105
},
{
"epoch": 0.09611183923110529,
"grad_norm": 1.7572168111801147,
"learning_rate": 0.00019985195270383018,
"loss": 0.6073,
"step": 110
},
{
"epoch": 0.10048055919615553,
"grad_norm": 2.745215654373169,
"learning_rate": 0.00019983662497440133,
"loss": 0.586,
"step": 115
},
{
"epoch": 0.10484927916120576,
"grad_norm": 1.8170915842056274,
"learning_rate": 0.0001998205430269564,
"loss": 0.5255,
"step": 120
},
{
"epoch": 0.109217999126256,
"grad_norm": 1.4944056272506714,
"learning_rate": 0.00019980370698298677,
"loss": 0.4219,
"step": 125
},
{
"epoch": 0.11358671909130624,
"grad_norm": 1.6616989374160767,
"learning_rate": 0.00019978611696968074,
"loss": 0.4231,
"step": 130
},
{
"epoch": 0.11795543905635648,
"grad_norm": 2.0523645877838135,
"learning_rate": 0.00019976777311992247,
"loss": 0.5298,
"step": 135
},
{
"epoch": 0.12232415902140673,
"grad_norm": 2.065765619277954,
"learning_rate": 0.00019974867557229098,
"loss": 0.5228,
"step": 140
},
{
"epoch": 0.12669287898645698,
"grad_norm": 1.7283438444137573,
"learning_rate": 0.00019972882447105912,
"loss": 0.3452,
"step": 145
},
{
"epoch": 0.1310615989515072,
"grad_norm": 2.655750274658203,
"learning_rate": 0.00019970821996619244,
"loss": 0.508,
"step": 150
},
{
"epoch": 0.13543031891655744,
"grad_norm": 2.67799973487854,
"learning_rate": 0.0001996868622133482,
"loss": 0.4359,
"step": 155
},
{
"epoch": 0.1397990388816077,
"grad_norm": 1.6298809051513672,
"learning_rate": 0.00019966475137387396,
"loss": 0.5447,
"step": 160
},
{
"epoch": 0.14416775884665792,
"grad_norm": 1.4772286415100098,
"learning_rate": 0.00019964188761480657,
"loss": 0.4105,
"step": 165
},
{
"epoch": 0.14853647881170817,
"grad_norm": 2.2986271381378174,
"learning_rate": 0.00019961827110887083,
"loss": 0.603,
"step": 170
},
{
"epoch": 0.1529051987767584,
"grad_norm": 2.8261911869049072,
"learning_rate": 0.00019959390203447817,
"loss": 0.4649,
"step": 175
},
{
"epoch": 0.15727391874180865,
"grad_norm": 1.7771011590957642,
"learning_rate": 0.00019956878057572524,
"loss": 0.4394,
"step": 180
},
{
"epoch": 0.16164263870685888,
"grad_norm": 1.7315421104431152,
"learning_rate": 0.00019954290692239274,
"loss": 0.5289,
"step": 185
},
{
"epoch": 0.16601135867190914,
"grad_norm": 1.6124423742294312,
"learning_rate": 0.00019951628126994373,
"loss": 0.4173,
"step": 190
},
{
"epoch": 0.17038007863695936,
"grad_norm": 1.792577862739563,
"learning_rate": 0.00019948890381952232,
"loss": 0.4331,
"step": 195
},
{
"epoch": 0.17474879860200962,
"grad_norm": 1.9038774967193604,
"learning_rate": 0.000199460774777952,
"loss": 0.4247,
"step": 200
},
{
"epoch": 0.17911751856705985,
"grad_norm": 2.457122802734375,
"learning_rate": 0.00019943189435773432,
"loss": 0.4519,
"step": 205
},
{
"epoch": 0.1834862385321101,
"grad_norm": 1.97683584690094,
"learning_rate": 0.00019940226277704706,
"loss": 0.4761,
"step": 210
},
{
"epoch": 0.18785495849716033,
"grad_norm": 2.1646862030029297,
"learning_rate": 0.0001993718802597426,
"loss": 0.5294,
"step": 215
},
{
"epoch": 0.19222367846221058,
"grad_norm": 1.565412998199463,
"learning_rate": 0.00019934074703534637,
"loss": 0.3999,
"step": 220
},
{
"epoch": 0.1965923984272608,
"grad_norm": 2.4315876960754395,
"learning_rate": 0.00019930886333905504,
"loss": 0.378,
"step": 225
},
{
"epoch": 0.20096111839231107,
"grad_norm": 2.7567529678344727,
"learning_rate": 0.00019927622941173467,
"loss": 0.5075,
"step": 230
},
{
"epoch": 0.2053298383573613,
"grad_norm": 1.8640387058258057,
"learning_rate": 0.00019924284549991902,
"loss": 0.4749,
"step": 235
},
{
"epoch": 0.20969855832241152,
"grad_norm": 2.090924024581909,
"learning_rate": 0.00019920871185580757,
"loss": 0.4353,
"step": 240
},
{
"epoch": 0.21406727828746178,
"grad_norm": 1.9691081047058105,
"learning_rate": 0.00019917382873726376,
"loss": 0.4051,
"step": 245
},
{
"epoch": 0.218435998252512,
"grad_norm": 1.8130213022232056,
"learning_rate": 0.0001991381964078128,
"loss": 0.526,
"step": 250
},
{
"epoch": 0.22280471821756226,
"grad_norm": 2.078805923461914,
"learning_rate": 0.00019910181513664,
"loss": 0.5654,
"step": 255
},
{
"epoch": 0.22717343818261249,
"grad_norm": 2.0686287879943848,
"learning_rate": 0.0001990646851985884,
"loss": 0.43,
"step": 260
},
{
"epoch": 0.23154215814766274,
"grad_norm": 1.475821614265442,
"learning_rate": 0.00019902680687415705,
"loss": 0.355,
"step": 265
},
{
"epoch": 0.23591087811271297,
"grad_norm": 1.901236891746521,
"learning_rate": 0.0001989881804494985,
"loss": 0.4522,
"step": 270
},
{
"epoch": 0.24027959807776322,
"grad_norm": 1.2583553791046143,
"learning_rate": 0.00019894880621641704,
"loss": 0.3869,
"step": 275
},
{
"epoch": 0.24464831804281345,
"grad_norm": 1.712336540222168,
"learning_rate": 0.00019890868447236613,
"loss": 0.454,
"step": 280
},
{
"epoch": 0.2490170380078637,
"grad_norm": 2.3967206478118896,
"learning_rate": 0.00019886781552044634,
"loss": 0.4074,
"step": 285
},
{
"epoch": 0.25338575797291396,
"grad_norm": 2.0578925609588623,
"learning_rate": 0.0001988261996694032,
"loss": 0.4268,
"step": 290
},
{
"epoch": 0.2577544779379642,
"grad_norm": 1.7411088943481445,
"learning_rate": 0.0001987838372336245,
"loss": 0.334,
"step": 295
},
{
"epoch": 0.2621231979030144,
"grad_norm": 1.8145533800125122,
"learning_rate": 0.0001987407285331382,
"loss": 0.4019,
"step": 300
},
{
"epoch": 0.26649191786806464,
"grad_norm": 1.3501653671264648,
"learning_rate": 0.00019869687389361,
"loss": 0.32,
"step": 305
},
{
"epoch": 0.27086063783311487,
"grad_norm": 1.208422303199768,
"learning_rate": 0.00019865227364634073,
"loss": 0.4548,
"step": 310
},
{
"epoch": 0.27522935779816515,
"grad_norm": 1.521690011024475,
"learning_rate": 0.00019860692812826396,
"loss": 0.3572,
"step": 315
},
{
"epoch": 0.2795980777632154,
"grad_norm": 2.2849714756011963,
"learning_rate": 0.0001985608376819434,
"loss": 0.4555,
"step": 320
},
{
"epoch": 0.2839667977282656,
"grad_norm": 2.7733798027038574,
"learning_rate": 0.00019851400265557037,
"loss": 0.4726,
"step": 325
},
{
"epoch": 0.28833551769331583,
"grad_norm": 1.973522424697876,
"learning_rate": 0.00019846642340296114,
"loss": 0.4585,
"step": 330
},
{
"epoch": 0.2927042376583661,
"grad_norm": 1.7133642435073853,
"learning_rate": 0.0001984181002835542,
"loss": 0.4679,
"step": 335
},
{
"epoch": 0.29707295762341634,
"grad_norm": 2.8383235931396484,
"learning_rate": 0.00019836903366240768,
"loss": 0.4119,
"step": 340
},
{
"epoch": 0.30144167758846657,
"grad_norm": 2.798276901245117,
"learning_rate": 0.00019831922391019645,
"loss": 0.3665,
"step": 345
},
{
"epoch": 0.3058103975535168,
"grad_norm": 2.171276569366455,
"learning_rate": 0.00019826867140320938,
"loss": 0.5691,
"step": 350
},
{
"epoch": 0.3101791175185671,
"grad_norm": 2.0866177082061768,
"learning_rate": 0.00019821737652334653,
"loss": 0.4074,
"step": 355
},
{
"epoch": 0.3145478374836173,
"grad_norm": 1.3713918924331665,
"learning_rate": 0.0001981653396581162,
"loss": 0.3379,
"step": 360
},
{
"epoch": 0.31891655744866754,
"grad_norm": 1.6086684465408325,
"learning_rate": 0.0001981125612006321,
"loss": 0.3563,
"step": 365
},
{
"epoch": 0.32328527741371776,
"grad_norm": 2.655686378479004,
"learning_rate": 0.0001980590415496102,
"loss": 0.3988,
"step": 370
},
{
"epoch": 0.32765399737876805,
"grad_norm": 1.5271559953689575,
"learning_rate": 0.00019800478110936596,
"loss": 0.5784,
"step": 375
},
{
"epoch": 0.3320227173438183,
"grad_norm": 1.3043195009231567,
"learning_rate": 0.00019794978028981106,
"loss": 0.2637,
"step": 380
},
{
"epoch": 0.3363914373088685,
"grad_norm": 2.539109706878662,
"learning_rate": 0.0001978940395064504,
"loss": 0.4658,
"step": 385
},
{
"epoch": 0.34076015727391873,
"grad_norm": 1.7521268129348755,
"learning_rate": 0.00019783755918037903,
"loss": 0.4253,
"step": 390
},
{
"epoch": 0.34512887723896896,
"grad_norm": 1.5679692029953003,
"learning_rate": 0.00019778033973827882,
"loss": 0.4528,
"step": 395
},
{
"epoch": 0.34949759720401924,
"grad_norm": 1.670640468597412,
"learning_rate": 0.00019772238161241528,
"loss": 0.3724,
"step": 400
},
{
"epoch": 0.35386631716906947,
"grad_norm": 1.520856261253357,
"learning_rate": 0.00019766368524063438,
"loss": 0.4141,
"step": 405
},
{
"epoch": 0.3582350371341197,
"grad_norm": 1.0802158117294312,
"learning_rate": 0.00019760425106635926,
"loss": 0.3268,
"step": 410
},
{
"epoch": 0.3626037570991699,
"grad_norm": 1.7306379079818726,
"learning_rate": 0.0001975440795385866,
"loss": 0.3654,
"step": 415
},
{
"epoch": 0.3669724770642202,
"grad_norm": 1.5037274360656738,
"learning_rate": 0.0001974831711118836,
"loss": 0.4285,
"step": 420
},
{
"epoch": 0.37134119702927043,
"grad_norm": 1.4654844999313354,
"learning_rate": 0.00019742152624638437,
"loss": 0.2548,
"step": 425
},
{
"epoch": 0.37570991699432066,
"grad_norm": 2.6770753860473633,
"learning_rate": 0.00019735914540778638,
"loss": 0.4238,
"step": 430
},
{
"epoch": 0.3800786369593709,
"grad_norm": 1.1864055395126343,
"learning_rate": 0.00019729602906734704,
"loss": 0.3959,
"step": 435
},
{
"epoch": 0.38444735692442117,
"grad_norm": 1.904876708984375,
"learning_rate": 0.00019723217770188024,
"loss": 0.3603,
"step": 440
},
{
"epoch": 0.3888160768894714,
"grad_norm": 1.7086598873138428,
"learning_rate": 0.0001971675917937525,
"loss": 0.551,
"step": 445
},
{
"epoch": 0.3931847968545216,
"grad_norm": 1.4635995626449585,
"learning_rate": 0.00019710227183087947,
"loss": 0.3738,
"step": 450
},
{
"epoch": 0.39755351681957185,
"grad_norm": 1.6047295331954956,
"learning_rate": 0.00019703621830672238,
"loss": 0.475,
"step": 455
},
{
"epoch": 0.40192223678462213,
"grad_norm": 1.4741933345794678,
"learning_rate": 0.00019696943172028394,
"loss": 0.4021,
"step": 460
},
{
"epoch": 0.40629095674967236,
"grad_norm": 2.8138020038604736,
"learning_rate": 0.00019690191257610497,
"loss": 0.3665,
"step": 465
},
{
"epoch": 0.4106596767147226,
"grad_norm": 1.6264874935150146,
"learning_rate": 0.00019683366138426034,
"loss": 0.3598,
"step": 470
},
{
"epoch": 0.4150283966797728,
"grad_norm": 1.6185061931610107,
"learning_rate": 0.00019676467866035525,
"loss": 0.5003,
"step": 475
},
{
"epoch": 0.41939711664482304,
"grad_norm": 1.8654040098190308,
"learning_rate": 0.00019669496492552113,
"loss": 0.397,
"step": 480
},
{
"epoch": 0.4237658366098733,
"grad_norm": 1.2525237798690796,
"learning_rate": 0.00019662452070641205,
"loss": 0.3235,
"step": 485
},
{
"epoch": 0.42813455657492355,
"grad_norm": 1.7755401134490967,
"learning_rate": 0.00019655334653520036,
"loss": 0.2978,
"step": 490
},
{
"epoch": 0.4325032765399738,
"grad_norm": 1.6025470495224,
"learning_rate": 0.00019648144294957297,
"loss": 0.4436,
"step": 495
},
{
"epoch": 0.436871996505024,
"grad_norm": 1.085461974143982,
"learning_rate": 0.00019640881049272713,
"loss": 0.22,
"step": 500
},
{
"epoch": 0.4412407164700743,
"grad_norm": 1.491818904876709,
"learning_rate": 0.00019633544971336636,
"loss": 0.2714,
"step": 505
},
{
"epoch": 0.4456094364351245,
"grad_norm": 0.9479840993881226,
"learning_rate": 0.0001962613611656963,
"loss": 0.3735,
"step": 510
},
{
"epoch": 0.44997815640017474,
"grad_norm": 3.0529448986053467,
"learning_rate": 0.0001961865454094205,
"loss": 0.4779,
"step": 515
},
{
"epoch": 0.45434687636522497,
"grad_norm": 2.831089973449707,
"learning_rate": 0.00019611100300973635,
"loss": 0.469,
"step": 520
},
{
"epoch": 0.45871559633027525,
"grad_norm": 2.1834311485290527,
"learning_rate": 0.00019603473453733052,
"loss": 0.4163,
"step": 525
},
{
"epoch": 0.4630843162953255,
"grad_norm": 1.3152204751968384,
"learning_rate": 0.00019595774056837493,
"loss": 0.3744,
"step": 530
},
{
"epoch": 0.4674530362603757,
"grad_norm": 1.4493387937545776,
"learning_rate": 0.00019588002168452223,
"loss": 0.3117,
"step": 535
},
{
"epoch": 0.47182175622542594,
"grad_norm": 1.1412076950073242,
"learning_rate": 0.00019580157847290147,
"loss": 0.3152,
"step": 540
},
{
"epoch": 0.47619047619047616,
"grad_norm": 1.5004645586013794,
"learning_rate": 0.00019572241152611365,
"loss": 0.3271,
"step": 545
},
{
"epoch": 0.48055919615552645,
"grad_norm": 2.3333992958068848,
"learning_rate": 0.0001956425214422272,
"loss": 0.3626,
"step": 550
},
{
"epoch": 0.4849279161205767,
"grad_norm": 1.5423107147216797,
"learning_rate": 0.0001955619088247736,
"loss": 0.4588,
"step": 555
},
{
"epoch": 0.4892966360856269,
"grad_norm": 3.008280038833618,
"learning_rate": 0.00019548057428274266,
"loss": 0.5275,
"step": 560
},
{
"epoch": 0.49366535605067713,
"grad_norm": 1.0968583822250366,
"learning_rate": 0.00019539851843057798,
"loss": 0.3233,
"step": 565
},
{
"epoch": 0.4980340760157274,
"grad_norm": 1.265228271484375,
"learning_rate": 0.00019531574188817234,
"loss": 0.2743,
"step": 570
},
{
"epoch": 0.5024027959807776,
"grad_norm": 1.9382916688919067,
"learning_rate": 0.000195232245280863,
"loss": 0.3189,
"step": 575
},
{
"epoch": 0.5067715159458279,
"grad_norm": 1.6710058450698853,
"learning_rate": 0.00019514802923942687,
"loss": 0.345,
"step": 580
},
{
"epoch": 0.5111402359108781,
"grad_norm": 1.8377633094787598,
"learning_rate": 0.000195063094400076,
"loss": 0.4441,
"step": 585
},
{
"epoch": 0.5155089558759284,
"grad_norm": 1.432173728942871,
"learning_rate": 0.0001949774414044525,
"loss": 0.3277,
"step": 590
},
{
"epoch": 0.5198776758409785,
"grad_norm": 1.096330165863037,
"learning_rate": 0.0001948910708996239,
"loss": 0.3821,
"step": 595
},
{
"epoch": 0.5242463958060288,
"grad_norm": 1.1951391696929932,
"learning_rate": 0.00019480398353807798,
"loss": 0.4303,
"step": 600
},
{
"epoch": 0.5286151157710791,
"grad_norm": 0.9764880537986755,
"learning_rate": 0.0001947161799777183,
"loss": 0.2693,
"step": 605
},
{
"epoch": 0.5329838357361293,
"grad_norm": 1.2566354274749756,
"learning_rate": 0.00019462766088185874,
"loss": 0.2851,
"step": 610
},
{
"epoch": 0.5373525557011796,
"grad_norm": 1.494903802871704,
"learning_rate": 0.0001945384269192188,
"loss": 0.36,
"step": 615
},
{
"epoch": 0.5417212756662297,
"grad_norm": 1.5508995056152344,
"learning_rate": 0.00019444847876391844,
"loss": 0.3682,
"step": 620
},
{
"epoch": 0.54608999563128,
"grad_norm": 2.227889060974121,
"learning_rate": 0.00019435781709547305,
"loss": 0.3889,
"step": 625
},
{
"epoch": 0.5504587155963303,
"grad_norm": 0.9221494197845459,
"learning_rate": 0.0001942664425987882,
"loss": 0.3375,
"step": 630
},
{
"epoch": 0.5548274355613805,
"grad_norm": 1.3386973142623901,
"learning_rate": 0.00019417435596415458,
"loss": 0.4833,
"step": 635
},
{
"epoch": 0.5591961555264308,
"grad_norm": 1.9686752557754517,
"learning_rate": 0.00019408155788724272,
"loss": 0.4739,
"step": 640
},
{
"epoch": 0.563564875491481,
"grad_norm": 2.3978073596954346,
"learning_rate": 0.00019398804906909777,
"loss": 0.4681,
"step": 645
},
{
"epoch": 0.5679335954565312,
"grad_norm": 1.536699652671814,
"learning_rate": 0.0001938938302161342,
"loss": 0.2684,
"step": 650
},
{
"epoch": 0.5723023154215815,
"grad_norm": 1.691787600517273,
"learning_rate": 0.00019379890204013043,
"loss": 0.3512,
"step": 655
},
{
"epoch": 0.5766710353866317,
"grad_norm": 1.7557870149612427,
"learning_rate": 0.0001937032652582235,
"loss": 0.3423,
"step": 660
},
{
"epoch": 0.581039755351682,
"grad_norm": 1.7950220108032227,
"learning_rate": 0.0001936069205929036,
"loss": 0.2831,
"step": 665
},
{
"epoch": 0.5854084753167322,
"grad_norm": 1.928232192993164,
"learning_rate": 0.00019350986877200867,
"loss": 0.323,
"step": 670
},
{
"epoch": 0.5897771952817824,
"grad_norm": 1.86429762840271,
"learning_rate": 0.00019341211052871887,
"loss": 0.4248,
"step": 675
},
{
"epoch": 0.5941459152468327,
"grad_norm": 2.022738456726074,
"learning_rate": 0.00019331364660155103,
"loss": 0.3411,
"step": 680
},
{
"epoch": 0.598514635211883,
"grad_norm": 1.2337995767593384,
"learning_rate": 0.00019321447773435306,
"loss": 0.2368,
"step": 685
},
{
"epoch": 0.6028833551769331,
"grad_norm": 2.015075445175171,
"learning_rate": 0.00019311460467629843,
"loss": 0.5116,
"step": 690
},
{
"epoch": 0.6072520751419834,
"grad_norm": 1.2344030141830444,
"learning_rate": 0.00019301402818188036,
"loss": 0.3313,
"step": 695
},
{
"epoch": 0.6116207951070336,
"grad_norm": 1.129764437675476,
"learning_rate": 0.00019291274901090625,
"loss": 0.408,
"step": 700
},
{
"epoch": 0.6159895150720839,
"grad_norm": 1.4350385665893555,
"learning_rate": 0.00019281076792849184,
"loss": 0.3729,
"step": 705
},
{
"epoch": 0.6203582350371342,
"grad_norm": 1.9586119651794434,
"learning_rate": 0.00019270808570505553,
"loss": 0.4315,
"step": 710
},
{
"epoch": 0.6247269550021843,
"grad_norm": 1.0157238245010376,
"learning_rate": 0.00019260470311631243,
"loss": 0.2861,
"step": 715
},
{
"epoch": 0.6290956749672346,
"grad_norm": 1.3841652870178223,
"learning_rate": 0.00019250062094326864,
"loss": 0.4037,
"step": 720
},
{
"epoch": 0.6334643949322848,
"grad_norm": 1.848821997642517,
"learning_rate": 0.00019239583997221525,
"loss": 0.3665,
"step": 725
},
{
"epoch": 0.6378331148973351,
"grad_norm": 0.9416481256484985,
"learning_rate": 0.0001922903609947225,
"loss": 0.339,
"step": 730
},
{
"epoch": 0.6422018348623854,
"grad_norm": 1.0696804523468018,
"learning_rate": 0.0001921841848076336,
"loss": 0.2783,
"step": 735
},
{
"epoch": 0.6465705548274355,
"grad_norm": 1.9199622869491577,
"learning_rate": 0.00019207731221305903,
"loss": 0.2904,
"step": 740
},
{
"epoch": 0.6509392747924858,
"grad_norm": 1.347430944442749,
"learning_rate": 0.00019196974401837008,
"loss": 0.2719,
"step": 745
},
{
"epoch": 0.6553079947575361,
"grad_norm": 0.9743670225143433,
"learning_rate": 0.0001918614810361932,
"loss": 0.2748,
"step": 750
},
{
"epoch": 0.6596767147225863,
"grad_norm": 1.4043099880218506,
"learning_rate": 0.00019175252408440343,
"loss": 0.3285,
"step": 755
},
{
"epoch": 0.6640454346876365,
"grad_norm": 2.9343338012695312,
"learning_rate": 0.0001916428739861185,
"loss": 0.4962,
"step": 760
},
{
"epoch": 0.6684141546526867,
"grad_norm": 2.3201515674591064,
"learning_rate": 0.0001915325315696926,
"loss": 0.3243,
"step": 765
},
{
"epoch": 0.672782874617737,
"grad_norm": 1.675564169883728,
"learning_rate": 0.00019142149766870992,
"loss": 0.4596,
"step": 770
},
{
"epoch": 0.6771515945827873,
"grad_norm": 1.664604663848877,
"learning_rate": 0.00019130977312197854,
"loss": 0.3024,
"step": 775
},
{
"epoch": 0.6815203145478375,
"grad_norm": 1.8358148336410522,
"learning_rate": 0.00019119735877352412,
"loss": 0.3862,
"step": 780
},
{
"epoch": 0.6858890345128877,
"grad_norm": 1.3632128238677979,
"learning_rate": 0.00019108425547258328,
"loss": 0.2374,
"step": 785
},
{
"epoch": 0.6902577544779379,
"grad_norm": 2.0279934406280518,
"learning_rate": 0.0001909704640735975,
"loss": 0.4392,
"step": 790
},
{
"epoch": 0.6946264744429882,
"grad_norm": 1.2824902534484863,
"learning_rate": 0.0001908559854362064,
"loss": 0.2782,
"step": 795
},
{
"epoch": 0.6989951944080385,
"grad_norm": 1.3477047681808472,
"learning_rate": 0.00019074082042524145,
"loss": 0.3631,
"step": 800
},
{
"epoch": 0.7033639143730887,
"grad_norm": 1.8478046655654907,
"learning_rate": 0.00019062496991071928,
"loss": 0.3788,
"step": 805
},
{
"epoch": 0.7077326343381389,
"grad_norm": 1.470382571220398,
"learning_rate": 0.0001905084347678352,
"loss": 0.3825,
"step": 810
},
{
"epoch": 0.7121013543031892,
"grad_norm": 2.4951813220977783,
"learning_rate": 0.00019039121587695652,
"loss": 0.3359,
"step": 815
},
{
"epoch": 0.7164700742682394,
"grad_norm": 2.3441359996795654,
"learning_rate": 0.000190273314123616,
"loss": 0.32,
"step": 820
},
{
"epoch": 0.7208387942332897,
"grad_norm": 2.372884750366211,
"learning_rate": 0.00019015473039850513,
"loss": 0.3651,
"step": 825
},
{
"epoch": 0.7252075141983398,
"grad_norm": 2.4474101066589355,
"learning_rate": 0.0001900354655974672,
"loss": 0.4401,
"step": 830
},
{
"epoch": 0.7295762341633901,
"grad_norm": 1.4031054973602295,
"learning_rate": 0.0001899155206214909,
"loss": 0.308,
"step": 835
},
{
"epoch": 0.7339449541284404,
"grad_norm": 1.6008141040802002,
"learning_rate": 0.00018979489637670322,
"loss": 0.2937,
"step": 840
},
{
"epoch": 0.7383136740934906,
"grad_norm": 0.9202178120613098,
"learning_rate": 0.0001896735937743627,
"loss": 0.3157,
"step": 845
},
{
"epoch": 0.7426823940585409,
"grad_norm": 1.024746298789978,
"learning_rate": 0.00018955161373085253,
"loss": 0.2934,
"step": 850
},
{
"epoch": 0.747051114023591,
"grad_norm": 1.1573566198349,
"learning_rate": 0.00018942895716767374,
"loss": 0.3617,
"step": 855
},
{
"epoch": 0.7514198339886413,
"grad_norm": 1.227409839630127,
"learning_rate": 0.00018930562501143805,
"loss": 0.3581,
"step": 860
},
{
"epoch": 0.7557885539536916,
"grad_norm": 1.5460100173950195,
"learning_rate": 0.00018918161819386095,
"loss": 0.3393,
"step": 865
},
{
"epoch": 0.7601572739187418,
"grad_norm": 1.688852310180664,
"learning_rate": 0.0001890569376517548,
"loss": 0.4389,
"step": 870
},
{
"epoch": 0.764525993883792,
"grad_norm": 1.5271598100662231,
"learning_rate": 0.00018893158432702149,
"loss": 0.2915,
"step": 875
},
{
"epoch": 0.7688947138488423,
"grad_norm": 1.695788860321045,
"learning_rate": 0.00018880555916664555,
"loss": 0.4026,
"step": 880
},
{
"epoch": 0.7732634338138925,
"grad_norm": 1.6879792213439941,
"learning_rate": 0.00018867886312268683,
"loss": 0.2857,
"step": 885
},
{
"epoch": 0.7776321537789428,
"grad_norm": 2.0718719959259033,
"learning_rate": 0.00018855149715227344,
"loss": 0.4236,
"step": 890
},
{
"epoch": 0.782000873743993,
"grad_norm": 1.5112775564193726,
"learning_rate": 0.00018842346221759448,
"loss": 0.325,
"step": 895
},
{
"epoch": 0.7863695937090432,
"grad_norm": 1.2844749689102173,
"learning_rate": 0.00018829475928589271,
"loss": 0.3782,
"step": 900
},
{
"epoch": 0.7907383136740935,
"grad_norm": 2.150299072265625,
"learning_rate": 0.00018816538932945728,
"loss": 0.3726,
"step": 905
},
{
"epoch": 0.7951070336391437,
"grad_norm": 1.7050650119781494,
"learning_rate": 0.00018803535332561646,
"loss": 0.3824,
"step": 910
},
{
"epoch": 0.799475753604194,
"grad_norm": 1.8164982795715332,
"learning_rate": 0.00018790465225673012,
"loss": 0.3664,
"step": 915
},
{
"epoch": 0.8038444735692443,
"grad_norm": 1.1102941036224365,
"learning_rate": 0.00018777328711018244,
"loss": 0.3166,
"step": 920
},
{
"epoch": 0.8082131935342944,
"grad_norm": 1.4220764636993408,
"learning_rate": 0.0001876412588783743,
"loss": 0.3049,
"step": 925
},
{
"epoch": 0.8125819134993447,
"grad_norm": 2.11336088180542,
"learning_rate": 0.000187508568558716,
"loss": 0.3076,
"step": 930
},
{
"epoch": 0.8169506334643949,
"grad_norm": 1.9948710203170776,
"learning_rate": 0.00018737521715361948,
"loss": 0.3846,
"step": 935
},
{
"epoch": 0.8213193534294452,
"grad_norm": 1.8913676738739014,
"learning_rate": 0.00018724120567049094,
"loss": 0.4296,
"step": 940
},
{
"epoch": 0.8256880733944955,
"grad_norm": 1.3633447885513306,
"learning_rate": 0.0001871065351217231,
"loss": 0.3569,
"step": 945
},
{
"epoch": 0.8300567933595456,
"grad_norm": 1.4957417249679565,
"learning_rate": 0.00018697120652468762,
"loss": 0.3085,
"step": 950
},
{
"epoch": 0.8344255133245959,
"grad_norm": 2.076399803161621,
"learning_rate": 0.0001868352209017275,
"loss": 0.3331,
"step": 955
},
{
"epoch": 0.8387942332896461,
"grad_norm": 1.1817855834960938,
"learning_rate": 0.00018669857928014906,
"loss": 0.3414,
"step": 960
},
{
"epoch": 0.8431629532546964,
"grad_norm": 1.4255414009094238,
"learning_rate": 0.00018656128269221454,
"loss": 0.2782,
"step": 965
},
{
"epoch": 0.8475316732197467,
"grad_norm": 1.326687216758728,
"learning_rate": 0.0001864233321751341,
"loss": 0.2998,
"step": 970
},
{
"epoch": 0.8519003931847968,
"grad_norm": 2.222280263900757,
"learning_rate": 0.00018628472877105793,
"loss": 0.3348,
"step": 975
},
{
"epoch": 0.8562691131498471,
"grad_norm": 1.518401026725769,
"learning_rate": 0.00018614547352706863,
"loss": 0.3816,
"step": 980
},
{
"epoch": 0.8606378331148974,
"grad_norm": 1.1030207872390747,
"learning_rate": 0.00018600556749517305,
"loss": 0.3222,
"step": 985
},
{
"epoch": 0.8650065530799476,
"grad_norm": 2.406994104385376,
"learning_rate": 0.00018586501173229437,
"loss": 0.3754,
"step": 990
},
{
"epoch": 0.8693752730449978,
"grad_norm": 1.2401646375656128,
"learning_rate": 0.00018572380730026434,
"loss": 0.4402,
"step": 995
},
{
"epoch": 0.873743993010048,
"grad_norm": 2.0233402252197266,
"learning_rate": 0.0001855819552658149,
"loss": 0.3323,
"step": 1000
},
{
"epoch": 0.8781127129750983,
"grad_norm": 1.5329450368881226,
"learning_rate": 0.00018543945670057045,
"loss": 0.235,
"step": 1005
},
{
"epoch": 0.8824814329401486,
"grad_norm": 1.8849459886550903,
"learning_rate": 0.00018529631268103964,
"loss": 0.357,
"step": 1010
},
{
"epoch": 0.8868501529051988,
"grad_norm": 2.016646146774292,
"learning_rate": 0.0001851525242886071,
"loss": 0.2663,
"step": 1015
},
{
"epoch": 0.891218872870249,
"grad_norm": 2.3272440433502197,
"learning_rate": 0.0001850080926095255,
"loss": 0.2926,
"step": 1020
},
{
"epoch": 0.8955875928352992,
"grad_norm": 1.7760261297225952,
"learning_rate": 0.00018486301873490713,
"loss": 0.4155,
"step": 1025
},
{
"epoch": 0.8999563128003495,
"grad_norm": 1.4679979085922241,
"learning_rate": 0.0001847173037607159,
"loss": 0.2877,
"step": 1030
},
{
"epoch": 0.9043250327653998,
"grad_norm": 1.8398054838180542,
"learning_rate": 0.0001845709487877588,
"loss": 0.2856,
"step": 1035
},
{
"epoch": 0.9086937527304499,
"grad_norm": 3.05880069732666,
"learning_rate": 0.00018442395492167775,
"loss": 0.3373,
"step": 1040
},
{
"epoch": 0.9130624726955002,
"grad_norm": 1.2527328729629517,
"learning_rate": 0.0001842763232729412,
"loss": 0.2412,
"step": 1045
},
{
"epoch": 0.9174311926605505,
"grad_norm": 1.7745814323425293,
"learning_rate": 0.00018412805495683575,
"loss": 0.3955,
"step": 1050
},
{
"epoch": 0.9217999126256007,
"grad_norm": 3.2864468097686768,
"learning_rate": 0.0001839791510934577,
"loss": 0.333,
"step": 1055
},
{
"epoch": 0.926168632590651,
"grad_norm": 2.0274927616119385,
"learning_rate": 0.0001838296128077046,
"loss": 0.4004,
"step": 1060
},
{
"epoch": 0.9305373525557011,
"grad_norm": 1.9851633310317993,
"learning_rate": 0.0001836794412292668,
"loss": 0.3132,
"step": 1065
},
{
"epoch": 0.9349060725207514,
"grad_norm": 1.3309999704360962,
"learning_rate": 0.00018352863749261883,
"loss": 0.2645,
"step": 1070
},
{
"epoch": 0.9392747924858017,
"grad_norm": 2.0173072814941406,
"learning_rate": 0.00018337720273701088,
"loss": 0.4376,
"step": 1075
},
{
"epoch": 0.9436435124508519,
"grad_norm": 1.815408706665039,
"learning_rate": 0.00018322513810646024,
"loss": 0.2851,
"step": 1080
},
{
"epoch": 0.9480122324159022,
"grad_norm": 1.1190584897994995,
"learning_rate": 0.00018307244474974254,
"loss": 0.4664,
"step": 1085
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.9746566414833069,
"learning_rate": 0.00018291912382038317,
"loss": 0.3816,
"step": 1090
},
{
"epoch": 0.9567496723460026,
"grad_norm": 1.9062715768814087,
"learning_rate": 0.0001827651764766485,
"loss": 0.3031,
"step": 1095
},
{
"epoch": 0.9611183923110529,
"grad_norm": 1.027502417564392,
"learning_rate": 0.00018261060388153718,
"loss": 0.2657,
"step": 1100
},
{
"epoch": 0.9654871122761031,
"grad_norm": 2.239164352416992,
"learning_rate": 0.00018245540720277135,
"loss": 0.3367,
"step": 1105
},
{
"epoch": 0.9698558322411533,
"grad_norm": 1.5922635793685913,
"learning_rate": 0.0001822995876127878,
"loss": 0.3044,
"step": 1110
},
{
"epoch": 0.9742245522062036,
"grad_norm": 1.9189236164093018,
"learning_rate": 0.00018214314628872905,
"loss": 0.3326,
"step": 1115
},
{
"epoch": 0.9785932721712538,
"grad_norm": 1.1626375913619995,
"learning_rate": 0.00018198608441243467,
"loss": 0.2761,
"step": 1120
},
{
"epoch": 0.9829619921363041,
"grad_norm": 1.805367112159729,
"learning_rate": 0.00018182840317043202,
"loss": 0.3337,
"step": 1125
},
{
"epoch": 0.9873307121013543,
"grad_norm": 1.5879418849945068,
"learning_rate": 0.0001816701037539277,
"loss": 0.3242,
"step": 1130
},
{
"epoch": 0.9916994320664045,
"grad_norm": 1.3560898303985596,
"learning_rate": 0.00018151118735879805,
"loss": 0.2794,
"step": 1135
},
{
"epoch": 0.9960681520314548,
"grad_norm": 1.0656763315200806,
"learning_rate": 0.0001813516551855806,
"loss": 0.3336,
"step": 1140
},
{
"epoch": 1.0,
"grad_norm": 3.2105913162231445,
"learning_rate": 0.00018119150843946472,
"loss": 0.3753,
"step": 1145
},
{
"epoch": 1.0043687199650502,
"grad_norm": 1.2890548706054688,
"learning_rate": 0.00018103074833028258,
"loss": 0.2943,
"step": 1150
},
{
"epoch": 1.0087374399301006,
"grad_norm": 1.8480075597763062,
"learning_rate": 0.00018086937607250002,
"loss": 0.3057,
"step": 1155
},
{
"epoch": 1.0131061598951507,
"grad_norm": 1.3526337146759033,
"learning_rate": 0.00018070739288520736,
"loss": 0.2328,
"step": 1160
},
{
"epoch": 1.017474879860201,
"grad_norm": 1.0462696552276611,
"learning_rate": 0.00018054479999211025,
"loss": 0.1896,
"step": 1165
},
{
"epoch": 1.0218435998252513,
"grad_norm": 0.9586630463600159,
"learning_rate": 0.00018038159862152027,
"loss": 0.2567,
"step": 1170
},
{
"epoch": 1.0262123197903015,
"grad_norm": 1.0181586742401123,
"learning_rate": 0.0001802177900063459,
"loss": 0.2653,
"step": 1175
},
{
"epoch": 1.0305810397553516,
"grad_norm": 1.2034084796905518,
"learning_rate": 0.0001800533753840829,
"loss": 0.3953,
"step": 1180
},
{
"epoch": 1.0349497597204018,
"grad_norm": 2.6563191413879395,
"learning_rate": 0.0001798883559968053,
"loss": 0.185,
"step": 1185
},
{
"epoch": 1.0393184796854522,
"grad_norm": 1.2556034326553345,
"learning_rate": 0.00017972273309115568,
"loss": 0.2452,
"step": 1190
},
{
"epoch": 1.0436871996505024,
"grad_norm": 1.4751702547073364,
"learning_rate": 0.00017955650791833604,
"loss": 0.2827,
"step": 1195
},
{
"epoch": 1.0480559196155526,
"grad_norm": 3.8620717525482178,
"learning_rate": 0.00017938968173409811,
"loss": 0.2953,
"step": 1200
},
{
"epoch": 1.052424639580603,
"grad_norm": 1.2123383283615112,
"learning_rate": 0.00017922225579873407,
"loss": 0.2165,
"step": 1205
},
{
"epoch": 1.0567933595456531,
"grad_norm": 1.911566972732544,
"learning_rate": 0.0001790542313770669,
"loss": 0.2444,
"step": 1210
},
{
"epoch": 1.0611620795107033,
"grad_norm": 1.9949162006378174,
"learning_rate": 0.00017888560973844083,
"loss": 0.255,
"step": 1215
},
{
"epoch": 1.0655307994757537,
"grad_norm": 0.9666941165924072,
"learning_rate": 0.0001787163921567118,
"loss": 0.1913,
"step": 1220
},
{
"epoch": 1.0698995194408039,
"grad_norm": 0.7195447087287903,
"learning_rate": 0.0001785465799102378,
"loss": 0.2541,
"step": 1225
},
{
"epoch": 1.074268239405854,
"grad_norm": 1.5414377450942993,
"learning_rate": 0.0001783761742818693,
"loss": 0.3626,
"step": 1230
},
{
"epoch": 1.0786369593709044,
"grad_norm": 1.2173676490783691,
"learning_rate": 0.0001782051765589394,
"loss": 0.2913,
"step": 1235
},
{
"epoch": 1.0830056793359546,
"grad_norm": 1.6966580152511597,
"learning_rate": 0.00017803358803325416,
"loss": 0.2613,
"step": 1240
},
{
"epoch": 1.0873743993010048,
"grad_norm": 1.8033946752548218,
"learning_rate": 0.00017786141000108302,
"loss": 0.2734,
"step": 1245
},
{
"epoch": 1.091743119266055,
"grad_norm": 1.1834598779678345,
"learning_rate": 0.00017768864376314873,
"loss": 0.2548,
"step": 1250
},
{
"epoch": 1.0961118392311053,
"grad_norm": 1.441835641860962,
"learning_rate": 0.00017751529062461777,
"loss": 0.3404,
"step": 1255
},
{
"epoch": 1.1004805591961555,
"grad_norm": 1.443575382232666,
"learning_rate": 0.0001773413518950902,
"loss": 0.312,
"step": 1260
},
{
"epoch": 1.1048492791612057,
"grad_norm": 1.2344982624053955,
"learning_rate": 0.0001771668288885901,
"loss": 0.2594,
"step": 1265
},
{
"epoch": 1.109217999126256,
"grad_norm": 1.3508884906768799,
"learning_rate": 0.0001769917229235554,
"loss": 0.3467,
"step": 1270
},
{
"epoch": 1.1135867190913062,
"grad_norm": 1.3105831146240234,
"learning_rate": 0.00017681603532282805,
"loss": 0.2393,
"step": 1275
},
{
"epoch": 1.1179554390563564,
"grad_norm": 1.3476370573043823,
"learning_rate": 0.00017663976741364394,
"loss": 0.3318,
"step": 1280
},
{
"epoch": 1.1223241590214068,
"grad_norm": 1.9715685844421387,
"learning_rate": 0.00017646292052762296,
"loss": 0.2808,
"step": 1285
},
{
"epoch": 1.126692878986457,
"grad_norm": 1.2339669466018677,
"learning_rate": 0.00017628549600075884,
"loss": 0.2753,
"step": 1290
},
{
"epoch": 1.1310615989515072,
"grad_norm": 1.3921184539794922,
"learning_rate": 0.00017610749517340914,
"loss": 0.2096,
"step": 1295
},
{
"epoch": 1.1354303189165575,
"grad_norm": 1.3537594079971313,
"learning_rate": 0.0001759289193902851,
"loss": 0.2232,
"step": 1300
},
{
"epoch": 1.1397990388816077,
"grad_norm": 2.207932472229004,
"learning_rate": 0.00017574977000044147,
"loss": 0.4179,
"step": 1305
},
{
"epoch": 1.144167758846658,
"grad_norm": 2.4464988708496094,
"learning_rate": 0.0001755700483572663,
"loss": 0.3863,
"step": 1310
},
{
"epoch": 1.148536478811708,
"grad_norm": 1.2169779539108276,
"learning_rate": 0.00017538975581847077,
"loss": 0.2131,
"step": 1315
},
{
"epoch": 1.1529051987767585,
"grad_norm": 1.2162644863128662,
"learning_rate": 0.00017520889374607893,
"loss": 0.2299,
"step": 1320
},
{
"epoch": 1.1572739187418086,
"grad_norm": 1.7051069736480713,
"learning_rate": 0.0001750274635064173,
"loss": 0.2994,
"step": 1325
},
{
"epoch": 1.1616426387068588,
"grad_norm": 1.814450740814209,
"learning_rate": 0.00017484546647010473,
"loss": 0.2948,
"step": 1330
},
{
"epoch": 1.1660113586719092,
"grad_norm": 1.3929287195205688,
"learning_rate": 0.00017466290401204186,
"loss": 0.3837,
"step": 1335
},
{
"epoch": 1.1703800786369594,
"grad_norm": 1.1973505020141602,
"learning_rate": 0.00017447977751140086,
"loss": 0.2335,
"step": 1340
},
{
"epoch": 1.1747487986020095,
"grad_norm": 1.4987421035766602,
"learning_rate": 0.00017429608835161506,
"loss": 0.2484,
"step": 1345
},
{
"epoch": 1.17911751856706,
"grad_norm": 1.3741153478622437,
"learning_rate": 0.00017411183792036822,
"loss": 0.2475,
"step": 1350
},
{
"epoch": 1.18348623853211,
"grad_norm": 1.8292722702026367,
"learning_rate": 0.0001739270276095844,
"loss": 0.2898,
"step": 1355
},
{
"epoch": 1.1878549584971603,
"grad_norm": 1.854209065437317,
"learning_rate": 0.00017374165881541717,
"loss": 0.3992,
"step": 1360
},
{
"epoch": 1.1922236784622107,
"grad_norm": 1.6196980476379395,
"learning_rate": 0.0001735557329382393,
"loss": 0.3053,
"step": 1365
},
{
"epoch": 1.1965923984272608,
"grad_norm": 1.6935441493988037,
"learning_rate": 0.00017336925138263195,
"loss": 0.239,
"step": 1370
},
{
"epoch": 1.200961118392311,
"grad_norm": 1.7809889316558838,
"learning_rate": 0.00017318221555737422,
"loss": 0.2152,
"step": 1375
},
{
"epoch": 1.2053298383573612,
"grad_norm": 2.3215832710266113,
"learning_rate": 0.0001729946268754324,
"loss": 0.3185,
"step": 1380
},
{
"epoch": 1.2096985583224116,
"grad_norm": 1.347947120666504,
"learning_rate": 0.00017280648675394947,
"loss": 0.3085,
"step": 1385
},
{
"epoch": 1.2140672782874617,
"grad_norm": 1.3840276002883911,
"learning_rate": 0.00017261779661423407,
"loss": 0.2016,
"step": 1390
},
{
"epoch": 1.218435998252512,
"grad_norm": 1.2935765981674194,
"learning_rate": 0.00017242855788175015,
"loss": 0.2063,
"step": 1395
},
{
"epoch": 1.2228047182175623,
"grad_norm": 1.2883801460266113,
"learning_rate": 0.00017223877198610591,
"loss": 0.2181,
"step": 1400
},
{
"epoch": 1.2271734381826125,
"grad_norm": 1.3021811246871948,
"learning_rate": 0.00017204844036104318,
"loss": 0.2283,
"step": 1405
},
{
"epoch": 1.2315421581476627,
"grad_norm": 1.1819430589675903,
"learning_rate": 0.00017185756444442648,
"loss": 0.2652,
"step": 1410
},
{
"epoch": 1.235910878112713,
"grad_norm": 2.0612573623657227,
"learning_rate": 0.00017166614567823212,
"loss": 0.2977,
"step": 1415
},
{
"epoch": 1.2402795980777632,
"grad_norm": 3.0888679027557373,
"learning_rate": 0.00017147418550853756,
"loss": 0.2682,
"step": 1420
},
{
"epoch": 1.2446483180428134,
"grad_norm": 2.311062812805176,
"learning_rate": 0.0001712816853855101,
"loss": 0.3329,
"step": 1425
},
{
"epoch": 1.2490170380078638,
"grad_norm": 1.2064367532730103,
"learning_rate": 0.00017108864676339627,
"loss": 0.2065,
"step": 1430
},
{
"epoch": 1.253385757972914,
"grad_norm": 1.4042255878448486,
"learning_rate": 0.00017089507110051066,
"loss": 0.1738,
"step": 1435
},
{
"epoch": 1.2577544779379641,
"grad_norm": 2.3508129119873047,
"learning_rate": 0.00017070095985922493,
"loss": 0.403,
"step": 1440
},
{
"epoch": 1.2621231979030143,
"grad_norm": 1.2386358976364136,
"learning_rate": 0.0001705063145059568,
"loss": 0.272,
"step": 1445
},
{
"epoch": 1.2664919178680647,
"grad_norm": 0.806268036365509,
"learning_rate": 0.00017031113651115893,
"loss": 0.2549,
"step": 1450
},
{
"epoch": 1.2708606378331149,
"grad_norm": 1.6991655826568604,
"learning_rate": 0.00017011542734930786,
"loss": 0.2331,
"step": 1455
},
{
"epoch": 1.2752293577981653,
"grad_norm": 1.1343746185302734,
"learning_rate": 0.00016991918849889283,
"loss": 0.3112,
"step": 1460
},
{
"epoch": 1.2795980777632154,
"grad_norm": 1.3381041288375854,
"learning_rate": 0.00016972242144240463,
"loss": 0.1974,
"step": 1465
},
{
"epoch": 1.2839667977282656,
"grad_norm": 3.6427793502807617,
"learning_rate": 0.00016952512766632439,
"loss": 0.2315,
"step": 1470
},
{
"epoch": 1.2883355176933158,
"grad_norm": 1.8139313459396362,
"learning_rate": 0.0001693273086611123,
"loss": 0.1771,
"step": 1475
},
{
"epoch": 1.2927042376583662,
"grad_norm": 1.205609679222107,
"learning_rate": 0.00016912896592119654,
"loss": 0.2551,
"step": 1480
},
{
"epoch": 1.2970729576234163,
"grad_norm": 1.355162262916565,
"learning_rate": 0.00016893010094496172,
"loss": 0.2452,
"step": 1485
},
{
"epoch": 1.3014416775884665,
"grad_norm": 1.2561094760894775,
"learning_rate": 0.00016873071523473777,
"loss": 0.2163,
"step": 1490
},
{
"epoch": 1.305810397553517,
"grad_norm": 1.3165076971054077,
"learning_rate": 0.00016853081029678853,
"loss": 0.3273,
"step": 1495
},
{
"epoch": 1.310179117518567,
"grad_norm": 1.8802030086517334,
"learning_rate": 0.00016833038764130028,
"loss": 0.3797,
"step": 1500
},
{
"epoch": 1.3145478374836173,
"grad_norm": 1.7062153816223145,
"learning_rate": 0.0001681294487823704,
"loss": 0.2989,
"step": 1505
},
{
"epoch": 1.3189165574486674,
"grad_norm": 2.0729176998138428,
"learning_rate": 0.00016792799523799613,
"loss": 0.2587,
"step": 1510
},
{
"epoch": 1.3232852774137178,
"grad_norm": 1.129841685295105,
"learning_rate": 0.00016772602853006268,
"loss": 0.2201,
"step": 1515
},
{
"epoch": 1.327653997378768,
"grad_norm": 1.2515584230422974,
"learning_rate": 0.00016752355018433206,
"loss": 0.2397,
"step": 1520
},
{
"epoch": 1.3320227173438184,
"grad_norm": 1.2597646713256836,
"learning_rate": 0.0001673205617304315,
"loss": 0.2157,
"step": 1525
},
{
"epoch": 1.3363914373088686,
"grad_norm": 1.8813763856887817,
"learning_rate": 0.0001671170647018418,
"loss": 0.2765,
"step": 1530
},
{
"epoch": 1.3407601572739187,
"grad_norm": 2.208132266998291,
"learning_rate": 0.00016691306063588583,
"loss": 0.2258,
"step": 1535
},
{
"epoch": 1.345128877238969,
"grad_norm": 1.9504673480987549,
"learning_rate": 0.00016670855107371683,
"loss": 0.2779,
"step": 1540
},
{
"epoch": 1.3494975972040193,
"grad_norm": 2.171309471130371,
"learning_rate": 0.00016650353756030692,
"loss": 0.3031,
"step": 1545
},
{
"epoch": 1.3538663171690695,
"grad_norm": 2.3320510387420654,
"learning_rate": 0.00016629802164443519,
"loss": 0.3288,
"step": 1550
},
{
"epoch": 1.3582350371341196,
"grad_norm": 1.4883947372436523,
"learning_rate": 0.0001660920048786763,
"loss": 0.2416,
"step": 1555
},
{
"epoch": 1.36260375709917,
"grad_norm": 1.1198906898498535,
"learning_rate": 0.00016588548881938845,
"loss": 0.2337,
"step": 1560
},
{
"epoch": 1.3669724770642202,
"grad_norm": 1.4867557287216187,
"learning_rate": 0.0001656784750267019,
"loss": 0.3154,
"step": 1565
},
{
"epoch": 1.3713411970292704,
"grad_norm": 2.2435972690582275,
"learning_rate": 0.0001654709650645069,
"loss": 0.245,
"step": 1570
},
{
"epoch": 1.3757099169943205,
"grad_norm": 2.2508065700531006,
"learning_rate": 0.00016526296050044215,
"loss": 0.3097,
"step": 1575
},
{
"epoch": 1.380078636959371,
"grad_norm": 0.8681387901306152,
"learning_rate": 0.00016505446290588277,
"loss": 0.295,
"step": 1580
},
{
"epoch": 1.3844473569244211,
"grad_norm": 1.143965244293213,
"learning_rate": 0.00016484547385592848,
"loss": 0.2534,
"step": 1585
},
{
"epoch": 1.3888160768894715,
"grad_norm": 2.3972761631011963,
"learning_rate": 0.00016463599492939177,
"loss": 0.2527,
"step": 1590
},
{
"epoch": 1.3931847968545217,
"grad_norm": 2.7718844413757324,
"learning_rate": 0.00016442602770878586,
"loss": 0.2697,
"step": 1595
},
{
"epoch": 1.3975535168195719,
"grad_norm": 1.7743192911148071,
"learning_rate": 0.00016421557378031279,
"loss": 0.2784,
"step": 1600
}
],
"logging_steps": 5,
"max_steps": 5725,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 776126445041664.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}