| { | |
| "best_metric": 0.2785227596759796, | |
| "best_model_checkpoint": "Machine-Generated-Text-Detection/deberta/checkpoint-5988", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 11976, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.08350033400133601, | |
| "grad_norm": 1.8728934526443481, | |
| "learning_rate": 1.9164996659986642e-05, | |
| "loss": 0.2671, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.16700066800267202, | |
| "grad_norm": 0.5576282143592834, | |
| "learning_rate": 1.832999331997328e-05, | |
| "loss": 0.1527, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.250501002004008, | |
| "grad_norm": 7.88850736618042, | |
| "learning_rate": 1.7494989979959922e-05, | |
| "loss": 0.135, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.33400133600534404, | |
| "grad_norm": 0.6910138130187988, | |
| "learning_rate": 1.6659986639946563e-05, | |
| "loss": 0.1107, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.41750167000668004, | |
| "grad_norm": 8.903568267822266, | |
| "learning_rate": 1.58249832999332e-05, | |
| "loss": 0.1152, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.501002004008016, | |
| "grad_norm": 0.08548898249864578, | |
| "learning_rate": 1.498997995991984e-05, | |
| "loss": 0.1024, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.584502338009352, | |
| "grad_norm": 8.673635482788086, | |
| "learning_rate": 1.415497661990648e-05, | |
| "loss": 0.0967, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.6680026720106881, | |
| "grad_norm": 7.020376682281494, | |
| "learning_rate": 1.331997327989312e-05, | |
| "loss": 0.1024, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.751503006012024, | |
| "grad_norm": 11.58995532989502, | |
| "learning_rate": 1.248496993987976e-05, | |
| "loss": 0.0809, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.8350033400133601, | |
| "grad_norm": 8.78143310546875, | |
| "learning_rate": 1.16499665998664e-05, | |
| "loss": 0.0823, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.918503674014696, | |
| "grad_norm": 9.562505722045898, | |
| "learning_rate": 1.081496325985304e-05, | |
| "loss": 0.0769, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_f1": 0.9371242484969939, | |
| "eval_loss": 0.2785227596759796, | |
| "eval_runtime": 269.2073, | |
| "eval_samples_per_second": 88.972, | |
| "eval_steps_per_second": 5.561, | |
| "step": 5988 | |
| }, | |
| { | |
| "epoch": 1.002004008016032, | |
| "grad_norm": 9.97668170928955, | |
| "learning_rate": 9.97995991983968e-06, | |
| "loss": 0.0756, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.085504342017368, | |
| "grad_norm": 0.9769249558448792, | |
| "learning_rate": 9.14495657982632e-06, | |
| "loss": 0.0652, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.169004676018704, | |
| "grad_norm": 0.14497636258602142, | |
| "learning_rate": 8.30995323981296e-06, | |
| "loss": 0.0587, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.25250501002004, | |
| "grad_norm": 0.04314618557691574, | |
| "learning_rate": 7.474949899799599e-06, | |
| "loss": 0.052, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.3360053440213762, | |
| "grad_norm": 0.008493722416460514, | |
| "learning_rate": 6.63994655978624e-06, | |
| "loss": 0.0481, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.4195056780227122, | |
| "grad_norm": 0.08650221675634384, | |
| "learning_rate": 5.80494321977288e-06, | |
| "loss": 0.0505, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.503006012024048, | |
| "grad_norm": 0.061995044350624084, | |
| "learning_rate": 4.969939879759519e-06, | |
| "loss": 0.0591, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.586506346025384, | |
| "grad_norm": 0.5668061375617981, | |
| "learning_rate": 4.1349365397461595e-06, | |
| "loss": 0.0449, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.6700066800267201, | |
| "grad_norm": 24.995769500732422, | |
| "learning_rate": 3.2999331997327993e-06, | |
| "loss": 0.0468, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.753507014028056, | |
| "grad_norm": 0.08540898561477661, | |
| "learning_rate": 2.464929859719439e-06, | |
| "loss": 0.0508, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 1.8370073480293923, | |
| "grad_norm": 0.008522373624145985, | |
| "learning_rate": 1.629926519706079e-06, | |
| "loss": 0.0508, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.920507682030728, | |
| "grad_norm": 0.0260649211704731, | |
| "learning_rate": 7.949231796927188e-07, | |
| "loss": 0.0369, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_f1": 0.939003006012024, | |
| "eval_loss": 0.3057553768157959, | |
| "eval_runtime": 270.9202, | |
| "eval_samples_per_second": 88.41, | |
| "eval_steps_per_second": 5.526, | |
| "step": 11976 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 11976, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.261573837252608e+16, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |