| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 70, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 7.2481944316327, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 1.1087, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 6.492266623182951, | |
| "learning_rate": 9.978029910109491e-06, | |
| "loss": 1.035, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.8065708875656128, | |
| "eval_runtime": 18.8479, | |
| "eval_samples_per_second": 23.769, | |
| "eval_steps_per_second": 0.371, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 4.2798539792652965, | |
| "learning_rate": 9.733078376452172e-06, | |
| "loss": 0.8612, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.5046278834342957, | |
| "eval_runtime": 18.7201, | |
| "eval_samples_per_second": 23.931, | |
| "eval_steps_per_second": 0.374, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 2.142857142857143, | |
| "grad_norm": 3.8618869981058155, | |
| "learning_rate": 9.229160296295488e-06, | |
| "loss": 0.6608, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 2.5003252439116106, | |
| "learning_rate": 8.49384713889421e-06, | |
| "loss": 0.4464, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.2836165726184845, | |
| "eval_runtime": 18.7006, | |
| "eval_samples_per_second": 23.956, | |
| "eval_steps_per_second": 0.374, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "grad_norm": 3.8783540297730106, | |
| "learning_rate": 7.56737096757421e-06, | |
| "loss": 0.2872, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.14440329372882843, | |
| "eval_runtime": 18.7131, | |
| "eval_samples_per_second": 23.94, | |
| "eval_steps_per_second": 0.374, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 4.285714285714286, | |
| "grad_norm": 2.6373488856495295, | |
| "learning_rate": 6.500423175001705e-06, | |
| "loss": 0.1884, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 2.524435640105693, | |
| "learning_rate": 5.351380944726465e-06, | |
| "loss": 0.1132, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.062061700969934464, | |
| "eval_runtime": 18.7255, | |
| "eval_samples_per_second": 23.925, | |
| "eval_steps_per_second": 0.374, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 5.714285714285714, | |
| "grad_norm": 1.7870565823017404, | |
| "learning_rate": 4.183113190907349e-06, | |
| "loss": 0.0555, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.028033925220370293, | |
| "eval_runtime": 18.7711, | |
| "eval_samples_per_second": 23.866, | |
| "eval_steps_per_second": 0.373, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 6.428571428571429, | |
| "grad_norm": 0.93279002933571, | |
| "learning_rate": 3.059540736638751e-06, | |
| "loss": 0.0371, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.017189698293805122, | |
| "eval_runtime": 18.7331, | |
| "eval_samples_per_second": 23.915, | |
| "eval_steps_per_second": 0.374, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 7.142857142857143, | |
| "grad_norm": 0.6713728183393005, | |
| "learning_rate": 2.042138937932388e-06, | |
| "loss": 0.0254, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 7.857142857142857, | |
| "grad_norm": 0.6733311496948943, | |
| "learning_rate": 1.186574109448091e-06, | |
| "loss": 0.017, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.012202958576381207, | |
| "eval_runtime": 18.6716, | |
| "eval_samples_per_second": 23.994, | |
| "eval_steps_per_second": 0.375, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 8.571428571428571, | |
| "grad_norm": 0.3925640628854087, | |
| "learning_rate": 5.396577872130676e-07, | |
| "loss": 0.0126, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.0099651413038373, | |
| "eval_runtime": 18.7021, | |
| "eval_samples_per_second": 23.955, | |
| "eval_steps_per_second": 0.374, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 9.285714285714286, | |
| "grad_norm": 0.30666444604194676, | |
| "learning_rate": 1.3678547337593494e-07, | |
| "loss": 0.0107, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.3132195964128779, | |
| "learning_rate": 0.0, | |
| "loss": 0.0099, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.009540632367134094, | |
| "eval_runtime": 18.6112, | |
| "eval_samples_per_second": 24.071, | |
| "eval_steps_per_second": 0.376, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 70, | |
| "total_flos": 29313151795200.0, | |
| "train_loss": 0.2696564427443913, | |
| "train_runtime": 988.8828, | |
| "train_samples_per_second": 4.53, | |
| "train_steps_per_second": 0.071 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 70, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 29313151795200.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |