zhangfz commited on
Commit ·
7b0e642
1
Parent(s): 80ac386
update
Browse files- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0001_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0001_seed_42/training_log_25bbd1f8-16e0-4420-8974-9d2327370772.txt +0 -0
- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_42/training_log_0d5098a3-838a-4da1-9c55-7cf124e00b3c.txt +0 -0
- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_43/config.json +27 -0
- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_43/training_log_54d1802f-5703-4c00-9c7a-31399abed1f8.txt +1398 -0
- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0005_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0005_seed_42/training_log_468b38e5-f77c-4ee0-bcb4-4167524b1954.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.0005_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.0005_seed_42/training_log_c9f9753c-1658-43e4-95d3-e6169bb4b689.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.005_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.005_seed_42/training_log_e57f0ab5-52cb-4681-9781-64dc94922cf1.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_5e-05_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_5e-05_seed_42/training_log_61537f41-2746-44ec-a838-26ce9c6334dc.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search_head/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.005_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/muon_lr_search_head/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.005_seed_42/training_log_bdcbf483-fd51-48ef-9dfd-70f6b4f1756b.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search_head/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.005_seed_42/training_log_be3ad22d-9271-4557-8520-677019d1ba75.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.0005_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.0005_seed_42/training_log_9fef894a-70e9-4399-b152-6eaf5fc0a83a.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.008_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.008_seed_42/training_log_16f75572-a351-445f-b63f-555db4ad8a4a.txt +0 -0
- logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.005_adam_lr_0.008_seed_42/config.json +27 -0
- logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.005_adam_lr_0.008_seed_42/training_log_ed6797f0-bbfd-4e73-ba21-3ae651151e5d.txt +0 -0
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0001_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 5,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0001,
|
| 7 |
+
"muon_lr": 0.05,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/adam_lr_search"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "25bbd1f8-16e0-4420-8974-9d2327370772",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0001_seed_42/training_log_25bbd1f8-16e0-4420-8974-9d2327370772.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 5,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0002,
|
| 7 |
+
"muon_lr": 0.05,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/adam_lr_search"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "0d5098a3-838a-4da1-9c55-7cf124e00b3c",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_42/training_log_0d5098a3-838a-4da1-9c55-7cf124e00b3c.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_43/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 43,
|
| 4 |
+
"optimizer_mode": 5,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0002,
|
| 7 |
+
"muon_lr": 0.05,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/adam_lr_search"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "54d1802f-5703-4c00-9c7a-31399abed1f8",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0002_seed_43/training_log_54d1802f-5703-4c00-9c7a-31399abed1f8.txt
ADDED
|
@@ -0,0 +1,1398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
with open(sys.argv[0]) as f:
|
| 4 |
+
code = f.read() # read the code of this file ASAP, for logging
|
| 5 |
+
import uuid
|
| 6 |
+
import time
|
| 7 |
+
import copy
|
| 8 |
+
import glob
|
| 9 |
+
from dataclasses import dataclass, asdict
|
| 10 |
+
from functools import lru_cache
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
import argparse # Keep argparse for --unet and potentially --optimizer_mode
|
| 13 |
+
import json
|
| 14 |
+
import random
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
| 18 |
+
import torch
|
| 19 |
+
torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems
|
| 20 |
+
from torch import Tensor, nn
|
| 21 |
+
import torch.nn.functional as F
|
| 22 |
+
import torch.distributed as dist
|
| 23 |
+
# use of FlexAttention contributed by @KoszarskyB
|
| 24 |
+
from torch.nn.attention.flex_attention import BlockMask, flex_attention
|
| 25 |
+
sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present
|
| 26 |
+
from optimizers.MUON_new_large_nes import Muon
|
| 27 |
+
from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed
|
| 28 |
+
import torch._inductor.config as config
|
| 29 |
+
from torch.nn.parallel import DistributedDataParallel as DDP
|
| 30 |
+
from kn_util.utils import setup_debugpy
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# -----------------------------------------------------------------------------
|
| 34 |
+
# Seeding Function
|
| 35 |
+
def set_seed(seed):
|
| 36 |
+
random.seed(seed)
|
| 37 |
+
np.random.seed(seed)
|
| 38 |
+
torch.manual_seed(seed)
|
| 39 |
+
if torch.cuda.is_available():
|
| 40 |
+
torch.cuda.manual_seed_all(seed)
|
| 41 |
+
print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# -----------------------------------------------------------------------------
|
| 45 |
+
# Our own simple Distributed Data Loader
|
| 46 |
+
|
| 47 |
+
def _peek_data_shard(filename):
|
| 48 |
+
# only reads the header, returns header data
|
| 49 |
+
with open(filename, "rb") as f:
|
| 50 |
+
# first read the header, which is 256 int32 integers (4 bytes each)
|
| 51 |
+
header = np.frombuffer(f.read(256*4), dtype=np.int32)
|
| 52 |
+
if header[0] != 20240520:
|
| 53 |
+
print("ERROR: magic number mismatch in the data .bin file!")
|
| 54 |
+
print("---> HINT: Are you passing in a correct file with --input_bin?")
|
| 55 |
+
print("---> HINT: Dataset encoding changed recently, re-run data prepro or refer again to README")
|
| 56 |
+
print("---> HINT: For example re-run: `python dev/data/tinyshakespeare.py`, then re-try")
|
| 57 |
+
exit(1)
|
| 58 |
+
assert header[1] == 1, "unsupported version"
|
| 59 |
+
ntok = header[2] # number of tokens (claimed)
|
| 60 |
+
return ntok # for now just return the number of tokens
|
| 61 |
+
|
| 62 |
+
def _load_data_shard(filename):
|
| 63 |
+
with open(filename, "rb") as f:
|
| 64 |
+
# first read the header, which is 256 int32 integers (4 bytes each)
|
| 65 |
+
header = np.frombuffer(f.read(256*4), dtype=np.int32)
|
| 66 |
+
assert header[0] == 20240520, "magic number mismatch in the data .bin file"
|
| 67 |
+
assert header[1] == 1, "unsupported version"
|
| 68 |
+
ntok = header[2] # number of tokens (claimed)
|
| 69 |
+
# the rest of it are tokens, stored as uint16
|
| 70 |
+
tokens = np.frombuffer(f.read(), dtype=np.uint16)
|
| 71 |
+
assert len(tokens) == ntok, "number of tokens read does not match header?"
|
| 72 |
+
return tokens
|
| 73 |
+
|
| 74 |
+
class DistributedDataLoader:
|
| 75 |
+
def __init__(self, filename_pattern, B, T, process_rank, num_processes):
|
| 76 |
+
self.process_rank = process_rank
|
| 77 |
+
self.num_processes = num_processes
|
| 78 |
+
self.B = B
|
| 79 |
+
self.T = T
|
| 80 |
+
|
| 81 |
+
# glob files that match the pattern
|
| 82 |
+
self.files = sorted(glob.glob(filename_pattern))
|
| 83 |
+
assert len(self.files) > 0, f"did not find any files that match the pattern {filename_pattern}"
|
| 84 |
+
|
| 85 |
+
# load and validate all data shards, count number of tokens in total
|
| 86 |
+
ntok_total = 0
|
| 87 |
+
for fname in self.files:
|
| 88 |
+
shard_ntok = _peek_data_shard(fname)
|
| 89 |
+
assert shard_ntok >= num_processes * B * T + 1
|
| 90 |
+
ntok_total += int(shard_ntok)
|
| 91 |
+
self.ntok_total = ntok_total
|
| 92 |
+
|
| 93 |
+
# kick things off
|
| 94 |
+
self.reset()
|
| 95 |
+
|
| 96 |
+
def reset(self):
|
| 97 |
+
self.current_shard = 0
|
| 98 |
+
self.current_position = self.process_rank * self.B * self.T
|
| 99 |
+
self.tokens = _load_data_shard(self.files[self.current_shard])
|
| 100 |
+
|
| 101 |
+
def advance(self): # advance to next data shard
|
| 102 |
+
self.current_shard = (self.current_shard + 1) % len(self.files)
|
| 103 |
+
self.current_position = self.process_rank * self.B * self.T
|
| 104 |
+
self.tokens = _load_data_shard(self.files[self.current_shard])
|
| 105 |
+
|
| 106 |
+
def next_batch(self):
|
| 107 |
+
B = self.B
|
| 108 |
+
T = self.T
|
| 109 |
+
buf = self.tokens[self.current_position : self.current_position+B*T+1]
|
| 110 |
+
buf = torch.tensor(buf.astype(np.int32), dtype=torch.long)
|
| 111 |
+
x = (buf[:-1]).view(B, T) # inputs
|
| 112 |
+
y = (buf[1:]).view(B, T) # targets
|
| 113 |
+
# advance current position and load next shard if necessary
|
| 114 |
+
self.current_position += B * T * self.num_processes
|
| 115 |
+
if self.current_position + (B * T * self.num_processes + 1) > len(self.tokens):
|
| 116 |
+
self.advance()
|
| 117 |
+
return x.cuda(), y.cuda()
|
| 118 |
+
|
| 119 |
+
# -----------------------------------------------------------------------------
|
| 120 |
+
# int main
|
| 121 |
+
|
| 122 |
+
@dataclass
|
| 123 |
+
class Hyperparameters:
|
| 124 |
+
# data hyperparams
|
| 125 |
+
input_bin : str = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin"
|
| 126 |
+
input_val_bin : str = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin"
|
| 127 |
+
# optimization hyperparams
|
| 128 |
+
batch_size : int = 8*120 # 8*120 # batch size, in sequences, across all devices
|
| 129 |
+
device_batch_size : int = 24 # batch size, in sequences, per device
|
| 130 |
+
sequence_length : int = 1024 # sequence length, in tokens
|
| 131 |
+
num_iterations : int = 6000 # number of iterations to run
|
| 132 |
+
learning_rate : float = 0.0036 / 2
|
| 133 |
+
warmup_iters : int = 0
|
| 134 |
+
warmdown_iters : int = 0 # number of iterations of linear warmup/warmdown for triangular or trapezoidal schedule
|
| 135 |
+
weight_decay : float = 0
|
| 136 |
+
# evaluation and logging hyperparams
|
| 137 |
+
val_loss_every : int = 125 # every how many steps to evaluate val loss? 0 for only at the end
|
| 138 |
+
val_tokens : int = 10420224 # 10420224 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons
|
| 139 |
+
save_every : int = 0 # every how many steps to save the checkpoint? 0 for only at the end
|
| 140 |
+
args = Hyperparameters()
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
# -----------------------------------------------------------------------------
|
| 145 |
+
# int main
|
| 146 |
+
# setup_debugpy(force=True)
|
| 147 |
+
parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon")
|
| 148 |
+
parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
|
| 149 |
+
# --- MODIFICATION: Add optimizer_mode as a CLI argument ---
|
| 150 |
+
parser.add_argument("--optimizer_mode", type=int, default=0,
|
| 151 |
+
help="Defines how Muon is applied. "
|
| 152 |
+
"0: Muon(All Hidden Attn+MLP - original); "
|
| 153 |
+
"1: Muon(QK Attn)/Adam(VO Attn,MLP); "
|
| 154 |
+
"2: Muon(VO Attn)/Adam(QK Attn,MLP); "
|
| 155 |
+
"3: Muon(All Attn)/Adam(MLP); "
|
| 156 |
+
"4: Muon(MLP)/Adam(All Attn)"
|
| 157 |
+
"5: All Adam (No Muon, all applicable matrices to Adam)."
|
| 158 |
+
"6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)."
|
| 159 |
+
"7: Muon(VO Attn, MLP)/Adam(QK Attn)."
|
| 160 |
+
"8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)."
|
| 161 |
+
)
|
| 162 |
+
parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo", "norope", "gated"])
|
| 163 |
+
parser.add_argument("--adam_lr", type=float, default=0.008, help="Learning rate for Adam matrices")
|
| 164 |
+
parser.add_argument("--muon_lr", type=float, default=0.05, help="Learning rate for Muon matrices")
|
| 165 |
+
parser.add_argument("--base_dir", type=str, default="logs_new_MUON_large/test", help="Base directory for logs")
|
| 166 |
+
exp_args = parser.parse_args()
|
| 167 |
+
set_seed(exp_args.seed)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# set up DDP (distributed data parallel). torchrun sets this env variable
|
| 172 |
+
assert torch.cuda.is_available()
|
| 173 |
+
dist.init_process_group(backend='nccl')
|
| 174 |
+
ddp_rank = int(os.environ['RANK'])
|
| 175 |
+
ddp_local_rank = int(os.environ['LOCAL_RANK'])
|
| 176 |
+
ddp_world_size = int(os.environ['WORLD_SIZE'])
|
| 177 |
+
device = f'cuda:{ddp_local_rank}'
|
| 178 |
+
torch.cuda.set_device(device)
|
| 179 |
+
print(f"using device: {device}")
|
| 180 |
+
master_process = (ddp_rank == 0) # this process will do logging, checkpointing etc.
|
| 181 |
+
|
| 182 |
+
logfile = None
|
| 183 |
+
run_dir_path_str = None
|
| 184 |
+
base_log_dir = Path(exp_args.base_dir)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
if master_process:
|
| 188 |
+
import subprocess
|
| 189 |
+
set_seed(exp_args.seed)
|
| 190 |
+
|
| 191 |
+
# Construct folder name based on config and seed
|
| 192 |
+
run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_adam_lr_{exp_args.adam_lr}_seed_{exp_args.seed}"
|
| 193 |
+
# run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_muon_lr_{exp_args.muon_lr}_adam_lr_{exp_args.adam_lr}_seed_{exp_args.seed}"
|
| 194 |
+
run_dir_path = base_log_dir / run_folder_name
|
| 195 |
+
run_dir_path.mkdir(parents=True, exist_ok=True)
|
| 196 |
+
run_dir_path_str = str(run_dir_path)
|
| 197 |
+
|
| 198 |
+
run_uuid = uuid.uuid4()
|
| 199 |
+
logfile = run_dir_path / f"training_log_{run_uuid}.txt"
|
| 200 |
+
print(f"Logging to: {logfile}")
|
| 201 |
+
|
| 202 |
+
# Save configuration
|
| 203 |
+
config_to_save = {
|
| 204 |
+
"cli_args": vars(exp_args),
|
| 205 |
+
"hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)},
|
| 206 |
+
"run_uuid_for_log": str(run_uuid),
|
| 207 |
+
"script_code_logged_at_start": True
|
| 208 |
+
}
|
| 209 |
+
config_file_path = run_dir_path / "config.json"
|
| 210 |
+
with open(config_file_path, "w") as f:
|
| 211 |
+
json.dump(config_to_save, f, indent=4)
|
| 212 |
+
print(f"Saved configuration to: {config_file_path}")
|
| 213 |
+
|
| 214 |
+
# convenience variables
|
| 215 |
+
B, T = args.device_batch_size, args.sequence_length
|
| 216 |
+
# calculate the number of steps to take in the val loop.
|
| 217 |
+
print(f"args.val_tokens: {args.val_tokens}, args.batch_size: {args.batch_size}, B: {B}, T: {T}, ddp_world_size: {ddp_world_size}")
|
| 218 |
+
assert args.val_tokens % (B * T * ddp_world_size) == 0
|
| 219 |
+
val_steps = args.val_tokens // (B * T * ddp_world_size)
|
| 220 |
+
# calculate the steps of gradient accumulation required to attain the desired global batch size.
|
| 221 |
+
assert args.batch_size % (B * ddp_world_size) == 0
|
| 222 |
+
train_accumulation_steps = args.batch_size // (B * ddp_world_size)
|
| 223 |
+
|
| 224 |
+
# load tokens
|
| 225 |
+
train_loader = DistributedDataLoader(args.input_bin, B, T, ddp_rank, ddp_world_size)
|
| 226 |
+
val_loader = DistributedDataLoader(args.input_val_bin, B, T, ddp_rank, ddp_world_size)
|
| 227 |
+
if master_process:
|
| 228 |
+
print(f"Training DataLoader: total number of tokens: {train_loader.ntok_total} across {len(train_loader.files)} files")
|
| 229 |
+
print(f"Validation DataLoader: total number of tokens: {val_loader.ntok_total} across {len(val_loader.files)} files")
|
| 230 |
+
x, y = train_loader.next_batch()
|
| 231 |
+
|
| 232 |
+
# there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. suggested to me by @Grad62304977.
|
| 233 |
+
# this originates from Karpathy's experiments.
|
| 234 |
+
num_vocab = 50304
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
if exp_args.model_parameterization == "qkvo":
|
| 238 |
+
from models.nano_GPT_qkvo_large import GPT, GPTConfig
|
| 239 |
+
# model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=25, n_head=12, n_embd=1536))
|
| 240 |
+
model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=36, n_head=20, n_embd=1280))
|
| 241 |
+
elif exp_args.model_parameterization == "gated":
|
| 242 |
+
from models.nano_GPT_gated_large import GPT, GPTConfig
|
| 243 |
+
model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=19, n_head=12, n_embd=1536))
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
if master_process:
|
| 248 |
+
print(sum(p.numel() for p in model.parameters()))
|
| 249 |
+
model = model.cuda()
|
| 250 |
+
if hasattr(config, "coordinate_descent_tuning"):
|
| 251 |
+
config.coordinate_descent_tuning = True # suggested by @Chillee
|
| 252 |
+
model = torch.compile(model)
|
| 253 |
+
# here we wrap model into DDP container
|
| 254 |
+
model = DDP(model, device_ids=[ddp_local_rank])
|
| 255 |
+
raw_model = model.module # always contains the "raw" unwrapped model
|
| 256 |
+
ctx = torch.amp.autocast(device_type='cuda', dtype=torch.bfloat16)
|
| 257 |
+
|
| 258 |
+
# for name, param in raw_model.named_parameters():
|
| 259 |
+
# print(name, param.shape)
|
| 260 |
+
|
| 261 |
+
if exp_args.model_parameterization == "qkvo" :
|
| 262 |
+
print("PRINT: Collecting parameters for optimizers...")
|
| 263 |
+
head_params = [raw_model.lm_head.weight]
|
| 264 |
+
# embed_params = [raw_model.transformer.wte.weight]
|
| 265 |
+
|
| 266 |
+
# Granular collection for attention and MLP parts
|
| 267 |
+
attn_q_params = []
|
| 268 |
+
attn_k_params = []
|
| 269 |
+
attn_v_params = []
|
| 270 |
+
attn_o_params = [] # W_O from c_proj
|
| 271 |
+
mlp_fc_params = []
|
| 272 |
+
mlp_proj_params = []
|
| 273 |
+
|
| 274 |
+
for block_module in raw_model.transformer.h:
|
| 275 |
+
if block_module.attn is not None:
|
| 276 |
+
# These attributes (c_q, c_k, c_v) MUST exist in your CausalSelfAttention class
|
| 277 |
+
if hasattr(block_module.attn, 'c_q'): attn_q_params.append(block_module.attn.c_q.weight)
|
| 278 |
+
else:
|
| 279 |
+
print(f"PRINT: Warning: c_q not found in attn module of a block.")
|
| 280 |
+
if hasattr(block_module.attn, 'c_k'): attn_k_params.append(block_module.attn.c_k.weight)
|
| 281 |
+
else: print(f"PRINT: Warning: c_k not found in attn module of a block.")
|
| 282 |
+
if hasattr(block_module.attn, 'c_v'): attn_v_params.append(block_module.attn.c_v.weight)
|
| 283 |
+
else: print(f"PRINT: Warning: c_v not found in attn module of a block.")
|
| 284 |
+
attn_o_params.append(block_module.attn.c_proj.weight)
|
| 285 |
+
if block_module.mlp is not None:
|
| 286 |
+
mlp_fc_params.append(block_module.mlp.c_fc.weight)
|
| 287 |
+
mlp_proj_params.append(block_module.mlp.c_proj.weight)
|
| 288 |
+
|
| 289 |
+
# Combine into logical groups for experiments
|
| 290 |
+
attn_qk_group = attn_q_params + attn_k_params
|
| 291 |
+
attn_vo_group = attn_v_params + attn_o_params
|
| 292 |
+
all_attn_matrices = attn_qk_group + attn_vo_group
|
| 293 |
+
mlp_w1_group = mlp_fc_params
|
| 294 |
+
mlp_w2_group = mlp_proj_params
|
| 295 |
+
all_mlp_matrices = mlp_fc_params + mlp_proj_params
|
| 296 |
+
|
| 297 |
+
# Scalar parameters (all others not explicitly grouped as matrices)
|
| 298 |
+
# matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
|
| 299 |
+
matrix_params_for_scalar_check = set(head_params + all_attn_matrices + all_mlp_matrices)
|
| 300 |
+
scalar_params = [p for n, p in raw_model.named_parameters() if p not in matrix_params_for_scalar_check]
|
| 301 |
+
for p_scalar in scalar_params: # Sanity check
|
| 302 |
+
if p_scalar.ndim >=2:
|
| 303 |
+
print(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.")
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# Determine parameter distribution based on optimizer_mode
|
| 307 |
+
muon_params_target_list = []
|
| 308 |
+
adam_matrix_target_list = [] # Matrices that Adam will handle specifically
|
| 309 |
+
adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
|
| 310 |
+
|
| 311 |
+
current_optimizer_mode = exp_args.optimizer_mode
|
| 312 |
+
|
| 313 |
+
print(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}")
|
| 314 |
+
|
| 315 |
+
if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
|
| 316 |
+
print(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.")
|
| 317 |
+
muon_params_target_list = all_attn_matrices + all_mlp_matrices
|
| 318 |
+
# Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
|
| 319 |
+
elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
|
| 320 |
+
print(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 321 |
+
muon_params_target_list = attn_qk_group
|
| 322 |
+
adam_matrix_target_list = attn_vo_group + all_mlp_matrices
|
| 323 |
+
elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
|
| 324 |
+
print(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 325 |
+
muon_params_target_list = attn_vo_group
|
| 326 |
+
adam_matrix_target_list = attn_qk_group + all_mlp_matrices
|
| 327 |
+
elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
|
| 328 |
+
print(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).")
|
| 329 |
+
muon_params_target_list = all_attn_matrices
|
| 330 |
+
adam_matrix_target_list = all_mlp_matrices
|
| 331 |
+
elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
|
| 332 |
+
print(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).")
|
| 333 |
+
muon_params_target_list = all_mlp_matrices
|
| 334 |
+
adam_matrix_target_list = all_attn_matrices
|
| 335 |
+
elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
|
| 336 |
+
print(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).")
|
| 337 |
+
muon_params_target_list = []
|
| 338 |
+
adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
|
| 339 |
+
elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
|
| 340 |
+
print(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
|
| 341 |
+
muon_params_target_list = mlp_w2_group
|
| 342 |
+
adam_matrix_target_list = all_attn_matrices + mlp_w1_group
|
| 343 |
+
elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
|
| 344 |
+
print(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).")
|
| 345 |
+
muon_params_target_list = attn_vo_group + all_mlp_matrices
|
| 346 |
+
adam_matrix_target_list = attn_qk_group
|
| 347 |
+
elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
|
| 348 |
+
print(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
|
| 349 |
+
muon_params_target_list = attn_vo_group + mlp_w2_group
|
| 350 |
+
adam_matrix_target_list = attn_qk_group + mlp_w1_group
|
| 351 |
+
elif current_optimizer_mode == 9: # Muon on V Attn, MLP
|
| 352 |
+
print(f"PRINT: Mode 9: Muon on V Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 353 |
+
muon_params_target_list = attn_v_params + all_mlp_matrices
|
| 354 |
+
adam_matrix_target_list = attn_o_params + attn_qk_group
|
| 355 |
+
elif current_optimizer_mode == 10: # Muon on O Attn, MLP
|
| 356 |
+
print(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 357 |
+
muon_params_target_list = attn_o_params + all_mlp_matrices
|
| 358 |
+
adam_matrix_target_list = attn_v_params + attn_qk_group
|
| 359 |
+
elif current_optimizer_mode == 11: # Muon on W_1, Adam on O Attn, QK Attn
|
| 360 |
+
print(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).")
|
| 361 |
+
muon_params_target_list = mlp_w1_group
|
| 362 |
+
adam_matrix_target_list = all_attn_matrices + mlp_w2_group
|
| 363 |
+
elif current_optimizer_mode == 12: # Muon on W_1, VO, Adam on others
|
| 364 |
+
print(f"PRINT: Mode 11: Muon on W_1. Adam on O Attn, QK Attn (Adam LR: {adam_matrix_lr}).")
|
| 365 |
+
muon_params_target_list = attn_vo_group + mlp_w1_group
|
| 366 |
+
adam_matrix_target_list = attn_qk_group + mlp_w2_group
|
| 367 |
+
elif current_optimizer_mode == 13:
|
| 368 |
+
print(f"PRINT: Mode 13: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).")
|
| 369 |
+
muon_params_target_list = attn_o_params + mlp_w2_group
|
| 370 |
+
adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group
|
| 371 |
+
elif current_optimizer_mode == 14:
|
| 372 |
+
print(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 373 |
+
muon_params_target_list = attn_o_params
|
| 374 |
+
adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices
|
| 375 |
+
elif current_optimizer_mode == 15:
|
| 376 |
+
print(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 377 |
+
muon_params_target_list = attn_v_params
|
| 378 |
+
adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices
|
| 379 |
+
else:
|
| 380 |
+
raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
|
| 381 |
+
|
| 382 |
+
# Adam optimizer setup
|
| 383 |
+
adam_param_groups_config = [
|
| 384 |
+
dict(params=head_params, lr=adam_matrix_lr),
|
| 385 |
+
#dict(params=embed_params, lr=adam_matrix_lr),
|
| 386 |
+
dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
|
| 387 |
+
]
|
| 388 |
+
# Add matrices specifically assigned to Adam for this experiment mode
|
| 389 |
+
if adam_matrix_target_list:
|
| 390 |
+
# Ensure adam_matrix_target_list is flat and contains Parameters
|
| 391 |
+
flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
|
| 392 |
+
if flat_adam_matrices: # Only add group if there are params
|
| 393 |
+
adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
|
| 394 |
+
|
| 395 |
+
# Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
|
| 396 |
+
adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
|
| 397 |
+
print(f"PRINT: The length of Adam param groups config: {len(adam_param_groups_config)}")
|
| 398 |
+
optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.9, 0.95), eps=1e-10, fused=True)
|
| 399 |
+
optimizers = [optimizer1] # Start with Adam
|
| 400 |
+
|
| 401 |
+
# Muon optimizer setup
|
| 402 |
+
# if muon_params_target_list:
|
| 403 |
+
# # Ensure muon_params_target_list is flat, unique, and contains Parameters
|
| 404 |
+
# flat_unique_muon_params = []
|
| 405 |
+
# seen_muon_ids = set()
|
| 406 |
+
# for sublist_or_p in muon_params_target_list:
|
| 407 |
+
# for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
|
| 408 |
+
# if p is not None and id(p) not in seen_muon_ids:
|
| 409 |
+
# flat_unique_muon_params.append(p)
|
| 410 |
+
# seen_muon_ids.add(id(p))
|
| 411 |
+
|
| 412 |
+
# muon_param_groups_config = []
|
| 413 |
+
# if flat_unique_muon_params:
|
| 414 |
+
# muon_param_groups_config.append(dict(params=flat_unique_muon_params, lr=exp_args.muon_lr))
|
| 415 |
+
|
| 416 |
+
# if flat_unique_muon_params: # Only create Muon if it has parameters
|
| 417 |
+
# optimizer2 = Muon(muon_param_groups_config, lr=exp_args.muon_lr, momentum=0.95,rank=ddp_rank, world_size=ddp_world_size) # Pass nesterov, ns_steps
|
| 418 |
+
# optimizers.append(optimizer2)
|
| 419 |
+
# else:
|
| 420 |
+
# print("PRINT: Muon optimizer not created as its target parameter list was empty.")
|
| 421 |
+
# optimizer2 = None # Explicitly set to None if not created
|
| 422 |
+
# else:
|
| 423 |
+
# print("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).")
|
| 424 |
+
# optimizer2 = None # Explicitly set to None
|
| 425 |
+
# Muon optimizer setup
|
| 426 |
+
if muon_params_target_list:
|
| 427 |
+
# Ensure muon_params_target_list is flat, unique, and contains Parameters
|
| 428 |
+
flat_unique_muon_params = []
|
| 429 |
+
seen_muon_ids = set()
|
| 430 |
+
for sublist_or_p in muon_params_target_list:
|
| 431 |
+
for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
|
| 432 |
+
if p is not None and id(p) not in seen_muon_ids:
|
| 433 |
+
flat_unique_muon_params.append(p)
|
| 434 |
+
seen_muon_ids.add(id(p))
|
| 435 |
+
|
| 436 |
+
if flat_unique_muon_params: # Only create Muon if it has parameters
|
| 437 |
+
optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95,rank=ddp_rank, world_size=ddp_world_size) # Pass nesterov, ns_steps
|
| 438 |
+
optimizers.append(optimizer2)
|
| 439 |
+
else:
|
| 440 |
+
print("PRINT: Muon optimizer not created as its target parameter list was empty.")
|
| 441 |
+
optimizer2 = None # Explicitly set to None if not created
|
| 442 |
+
else:
|
| 443 |
+
print("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).")
|
| 444 |
+
optimizer2 = None # Explicitly set to None
|
| 445 |
+
|
| 446 |
+
print(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}")
|
| 447 |
+
if optimizer2:
|
| 448 |
+
print(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.")
|
| 449 |
+
elif exp_args.model_parameterization == "gated":
|
| 450 |
+
print("PRINT: Collecting parameters for optimizers...")
|
| 451 |
+
head_params = [raw_model.lm_head.weight]
|
| 452 |
+
# embed_params = [raw_model.transformer.wte.weight]
|
| 453 |
+
|
| 454 |
+
# Granular collection for attention and MLP parts
|
| 455 |
+
attn_q_params = []
|
| 456 |
+
attn_k_params = []
|
| 457 |
+
attn_v_params = []
|
| 458 |
+
attn_o_params = [] # W_O from c_proj
|
| 459 |
+
mlp_fc_params = []
|
| 460 |
+
mlp_proj_params = []
|
| 461 |
+
mlp_up_params = []
|
| 462 |
+
|
| 463 |
+
for block_module in raw_model.transformer.h:
|
| 464 |
+
if block_module.attn is not None:
|
| 465 |
+
# These attributes (c_q, c_k, c_v) MUST exist in your CausalSelfAttention class
|
| 466 |
+
if hasattr(block_module.attn, 'c_q'): attn_q_params.append(block_module.attn.c_q.weight)
|
| 467 |
+
else:
|
| 468 |
+
print(f"PRINT: Warning: c_q not found in attn module of a block.")
|
| 469 |
+
if hasattr(block_module.attn, 'c_k'): attn_k_params.append(block_module.attn.c_k.weight)
|
| 470 |
+
else: print(f"PRINT: Warning: c_k not found in attn module of a block.")
|
| 471 |
+
if hasattr(block_module.attn, 'c_v'): attn_v_params.append(block_module.attn.c_v.weight)
|
| 472 |
+
else: print(f"PRINT: Warning: c_v not found in attn module of a block.")
|
| 473 |
+
attn_o_params.append(block_module.attn.c_proj.weight)
|
| 474 |
+
if block_module.mlp is not None:
|
| 475 |
+
mlp_fc_params.append(block_module.mlp.c_fc.weight)
|
| 476 |
+
mlp_proj_params.append(block_module.mlp.c_proj.weight)
|
| 477 |
+
mlp_up_params.append(block_module.mlp.c_up.weight)
|
| 478 |
+
|
| 479 |
+
# Combine into logical groups for experiments
|
| 480 |
+
attn_qk_group = attn_q_params + attn_k_params
|
| 481 |
+
attn_vo_group = attn_v_params + attn_o_params
|
| 482 |
+
all_attn_matrices = attn_qk_group + attn_vo_group
|
| 483 |
+
mlp_w1_group = mlp_fc_params + mlp_up_params
|
| 484 |
+
mlp_w2_group = mlp_proj_params
|
| 485 |
+
all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params
|
| 486 |
+
|
| 487 |
+
# Scalar parameters (all others not explicitly grouped as matrices)
|
| 488 |
+
# matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
|
| 489 |
+
matrix_params_for_scalar_check = set(head_params + all_attn_matrices + all_mlp_matrices)
|
| 490 |
+
scalar_params = [p for n, p in raw_model.named_parameters() if p not in matrix_params_for_scalar_check]
|
| 491 |
+
for p_scalar in scalar_params: # Sanity check
|
| 492 |
+
if p_scalar.ndim >=2:
|
| 493 |
+
print(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.")
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
# Determine parameter distribution based on optimizer_mode
|
| 497 |
+
muon_params_target_list = []
|
| 498 |
+
adam_matrix_target_list = [] # Matrices that Adam will handle specifically
|
| 499 |
+
adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned)
|
| 500 |
+
|
| 501 |
+
current_optimizer_mode = exp_args.optimizer_mode
|
| 502 |
+
|
| 503 |
+
print(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}")
|
| 504 |
+
|
| 505 |
+
if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
|
| 506 |
+
print(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.")
|
| 507 |
+
muon_params_target_list = all_attn_matrices + all_mlp_matrices
|
| 508 |
+
# Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
|
| 509 |
+
elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
|
| 510 |
+
print(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 511 |
+
muon_params_target_list = attn_qk_group
|
| 512 |
+
adam_matrix_target_list = attn_vo_group + all_mlp_matrices
|
| 513 |
+
elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP
|
| 514 |
+
print(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).")
|
| 515 |
+
muon_params_target_list = attn_vo_group
|
| 516 |
+
adam_matrix_target_list = attn_qk_group + all_mlp_matrices
|
| 517 |
+
elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP
|
| 518 |
+
print(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).")
|
| 519 |
+
muon_params_target_list = all_attn_matrices
|
| 520 |
+
adam_matrix_target_list = all_mlp_matrices
|
| 521 |
+
elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO)
|
| 522 |
+
print(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).")
|
| 523 |
+
muon_params_target_list = all_mlp_matrices
|
| 524 |
+
adam_matrix_target_list = all_attn_matrices
|
| 525 |
+
elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam
|
| 526 |
+
print(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).")
|
| 527 |
+
muon_params_target_list = []
|
| 528 |
+
adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam
|
| 529 |
+
elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP
|
| 530 |
+
print(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
|
| 531 |
+
muon_params_target_list = mlp_w2_group
|
| 532 |
+
adam_matrix_target_list = all_attn_matrices + mlp_w1_group
|
| 533 |
+
elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn
|
| 534 |
+
print(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).")
|
| 535 |
+
muon_params_target_list = attn_vo_group + all_mlp_matrices
|
| 536 |
+
adam_matrix_target_list = attn_qk_group
|
| 537 |
+
elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP
|
| 538 |
+
print(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).")
|
| 539 |
+
muon_params_target_list = attn_vo_group + mlp_w2_group
|
| 540 |
+
adam_matrix_target_list = attn_qk_group + mlp_w1_group
|
| 541 |
+
else:
|
| 542 |
+
raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}")
|
| 543 |
+
|
| 544 |
+
# Adam optimizer setup
|
| 545 |
+
adam_param_groups_config = [
|
| 546 |
+
dict(params=head_params, lr=adam_matrix_lr),
|
| 547 |
+
# dict(params=embed_params, lr=adam_matrix_lr),
|
| 548 |
+
dict(params=scalar_params, lr=adam_matrix_lr) # Scalar params always go to Adam
|
| 549 |
+
]
|
| 550 |
+
|
| 551 |
+
# Add matrices specifically assigned to Adam for this experiment mode
|
| 552 |
+
if adam_matrix_target_list:
|
| 553 |
+
# Ensure adam_matrix_target_list is flat and contains Parameters
|
| 554 |
+
flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None]
|
| 555 |
+
if flat_adam_matrices: # Only add group if there are params
|
| 556 |
+
adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr))
|
| 557 |
+
|
| 558 |
+
# Filter out any Adam groups that might be empty (e.g., if scalar_params was empty)
|
| 559 |
+
adam_param_groups_config = [g for g in adam_param_groups_config if g['params']]
|
| 560 |
+
# print(f"PRINT: The length of Adam param groups config: {len(adam_param_groups_config)}")
|
| 561 |
+
optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.9, 0.95), eps=1e-10, fused=True)
|
| 562 |
+
optimizers = [optimizer1] # Start with Adam
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
if muon_params_target_list:
|
| 566 |
+
# Ensure muon_params_target_list is flat, unique, and contains Parameters
|
| 567 |
+
flat_unique_muon_params = []
|
| 568 |
+
seen_muon_ids = set()
|
| 569 |
+
for sublist_or_p in muon_params_target_list:
|
| 570 |
+
for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]):
|
| 571 |
+
if p is not None and id(p) not in seen_muon_ids:
|
| 572 |
+
flat_unique_muon_params.append(p)
|
| 573 |
+
seen_muon_ids.add(id(p))
|
| 574 |
+
|
| 575 |
+
if flat_unique_muon_params: # Only create Muon if it has parameters
|
| 576 |
+
optimizer2 = Muon(flat_unique_muon_params, lr=exp_args.muon_lr, momentum=0.95,rank=ddp_rank, world_size=ddp_world_size) # Pass nesterov, ns_steps
|
| 577 |
+
optimizers.append(optimizer2)
|
| 578 |
+
else:
|
| 579 |
+
print("PRINT: Muon optimizer not created as its target parameter list was empty.")
|
| 580 |
+
optimizer2 = None # Explicitly set to None if not created
|
| 581 |
+
else:
|
| 582 |
+
print("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).")
|
| 583 |
+
optimizer2 = None # Explicitly set to None
|
| 584 |
+
|
| 585 |
+
print(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}")
|
| 586 |
+
if optimizer2:
|
| 587 |
+
print(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.")
|
| 588 |
+
|
| 589 |
+
# optimizer1 = torch.optim.AdamW(raw_model.lm_head.parameters(), lr=args.learning_rate, betas=(0.9, 0.95),
|
| 590 |
+
# weight_decay=args.weight_decay, fused=True)
|
| 591 |
+
# optimizer2 = Muon(raw_model.transformer.h.parameters(), lr=0.1*args.learning_rate, momentum=0.95,
|
| 592 |
+
# rank=ddp_rank, world_size=ddp_world_size)
|
| 593 |
+
|
| 594 |
+
# optimizers = [optimizer1, optimizer2]
|
| 595 |
+
# learning rate decay scheduler (linear warmup and warmdown)
|
| 596 |
+
def get_lr(it):
|
| 597 |
+
assert it <= args.num_iterations
|
| 598 |
+
# 1) linear warmup for warmup_iters steps
|
| 599 |
+
if it < args.warmup_iters:
|
| 600 |
+
return (it+1) / args.warmup_iters
|
| 601 |
+
# 2) constant lr for a while
|
| 602 |
+
elif it < args.num_iterations - args.warmdown_iters:
|
| 603 |
+
return 1.0
|
| 604 |
+
# 3) linear warmdown
|
| 605 |
+
else:
|
| 606 |
+
decay_ratio = (args.num_iterations - it) / args.warmdown_iters
|
| 607 |
+
return decay_ratio
|
| 608 |
+
schedulers = [torch.optim.lr_scheduler.LambdaLR(opt, get_lr) for opt in optimizers]
|
| 609 |
+
|
| 610 |
+
if master_process:
|
| 611 |
+
with open(logfile, "a") as f:
|
| 612 |
+
f.write(code)
|
| 613 |
+
|
| 614 |
+
training_time_ms = 0
|
| 615 |
+
# start the clock
|
| 616 |
+
torch.cuda.synchronize()
|
| 617 |
+
t0 = time.time()
|
| 618 |
+
# begin training
|
| 619 |
+
train_loader.reset()
|
| 620 |
+
for step in range(args.num_iterations + 1):
|
| 621 |
+
last_step = (step == args.num_iterations)
|
| 622 |
+
# This effectively ignores timing first 10 steps, which are slower for weird reasons.
|
| 623 |
+
# Alternately, and slightly more correctly in terms of benchmarking, we could do 10
|
| 624 |
+
# steps with dummy data first, and then re-initialize the model and reset the loader.
|
| 625 |
+
if step == 10:
|
| 626 |
+
training_time_ms = 0
|
| 627 |
+
t0 = time.time()
|
| 628 |
+
timed_steps = float('nan') if step <= 11 else (step - 10) + 1 # <= 11 to avoid bug in val
|
| 629 |
+
|
| 630 |
+
# once in a while evaluate the validation dataset
|
| 631 |
+
if (last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0)):
|
| 632 |
+
# stop the clock
|
| 633 |
+
torch.cuda.synchronize()
|
| 634 |
+
training_time_ms += 1000 * (time.time() - t0)
|
| 635 |
+
# run validation batches
|
| 636 |
+
with torch.no_grad():
|
| 637 |
+
val_loader.reset()
|
| 638 |
+
val_loss = 0.0
|
| 639 |
+
for _ in range(val_steps):
|
| 640 |
+
x_val, y_val = val_loader.next_batch()
|
| 641 |
+
with ctx: # of course, we'd like to use no_grad() here too, but that creates a torch.compile error for some reason
|
| 642 |
+
_, loss = model(x_val, y_val, return_logits=False)
|
| 643 |
+
val_loss += loss.detach()
|
| 644 |
+
del loss
|
| 645 |
+
dist.all_reduce(val_loss, op=dist.ReduceOp.AVG)
|
| 646 |
+
val_loss /= val_steps
|
| 647 |
+
# log val loss to console and to logfile
|
| 648 |
+
if master_process:
|
| 649 |
+
print(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms')
|
| 650 |
+
with open(logfile, "a") as f:
|
| 651 |
+
f.write(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms\n')
|
| 652 |
+
# start the clock again
|
| 653 |
+
torch.cuda.synchronize()
|
| 654 |
+
t0 = time.time()
|
| 655 |
+
|
| 656 |
+
if master_process and (last_step or (args.save_every > 0 and step % args.save_every == 0)):
|
| 657 |
+
# stop the clock
|
| 658 |
+
torch.cuda.synchronize()
|
| 659 |
+
training_time_ms += 1000 * (time.time() - t0)
|
| 660 |
+
# save the state of the training process
|
| 661 |
+
log = dict(step=step, code=code, model=raw_model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers])
|
| 662 |
+
torch.save(log, 'logs/%s/state_step%06d.pt' % (run_id, step))
|
| 663 |
+
# start the clock again
|
| 664 |
+
torch.cuda.synchronize()
|
| 665 |
+
t0 = time.time()
|
| 666 |
+
|
| 667 |
+
# bit confusing: we want to make sure to eval on 0th iteration
|
| 668 |
+
# but also after the very last iteration. so we loop for step <= num_iterations
|
| 669 |
+
# instead of just < num_iterations (one extra due to <=), only to do
|
| 670 |
+
# the validation/sampling one last time, and then we break right here as we're done.
|
| 671 |
+
if last_step:
|
| 672 |
+
break
|
| 673 |
+
|
| 674 |
+
# --------------- TRAINING SECTION BEGIN -----------------
|
| 675 |
+
model.train()
|
| 676 |
+
for i in range(1, train_accumulation_steps+1):
|
| 677 |
+
# forward pass
|
| 678 |
+
with ctx:
|
| 679 |
+
_, loss = model(x, y, return_logits=False)
|
| 680 |
+
train_loss = loss.detach()
|
| 681 |
+
# advance the dataset for the next batch
|
| 682 |
+
x, y = train_loader.next_batch()
|
| 683 |
+
# backward pass
|
| 684 |
+
if i < train_accumulation_steps:
|
| 685 |
+
with model.no_sync(): # there's no need to sync gradients every accumulation step
|
| 686 |
+
loss.backward()
|
| 687 |
+
else:
|
| 688 |
+
loss.backward() # just sync on the last step
|
| 689 |
+
for p in model.parameters():
|
| 690 |
+
p.grad /= train_accumulation_steps
|
| 691 |
+
# step the optimizers and schedulers
|
| 692 |
+
for opt, sched in zip(optimizers, schedulers):
|
| 693 |
+
opt.step()
|
| 694 |
+
sched.step()
|
| 695 |
+
# null the gradients
|
| 696 |
+
model.zero_grad(set_to_none=True)
|
| 697 |
+
# --------------- TRAINING SECTION END -------------------
|
| 698 |
+
# everything that follows now is just diagnostics, prints, logging, etc.
|
| 699 |
+
|
| 700 |
+
#dist.all_reduce(train_loss, op=dist.ReduceOp.AVG) # all-reducing the training loss would be more correct in terms of logging, but slower
|
| 701 |
+
if master_process:
|
| 702 |
+
approx_time = training_time_ms + 1000 * (time.time() - t0)
|
| 703 |
+
print(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms")
|
| 704 |
+
with open(logfile, "a") as f:
|
| 705 |
+
f.write(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms\n")
|
| 706 |
+
|
| 707 |
+
if master_process:
|
| 708 |
+
print(f"peak memory consumption: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB")step:0/6000 val_loss:20.6064 train_time:150ms step_avg:nanms
|
| 709 |
+
step:1/6000 train_loss:20.6139 train_time:41365ms step_avg:nanms
|
| 710 |
+
step:2/6000 train_loss:9.5932 train_time:44985ms step_avg:nanms
|
| 711 |
+
step:3/6000 train_loss:9.4262 train_time:48551ms step_avg:nanms
|
| 712 |
+
step:4/6000 train_loss:9.1741 train_time:52124ms step_avg:nanms
|
| 713 |
+
step:5/6000 train_loss:8.8712 train_time:55697ms step_avg:nanms
|
| 714 |
+
step:6/6000 train_loss:8.7882 train_time:59271ms step_avg:nanms
|
| 715 |
+
step:7/6000 train_loss:8.3167 train_time:62844ms step_avg:nanms
|
| 716 |
+
step:8/6000 train_loss:8.4007 train_time:66421ms step_avg:nanms
|
| 717 |
+
step:9/6000 train_loss:8.1648 train_time:69996ms step_avg:nanms
|
| 718 |
+
step:10/6000 train_loss:8.1921 train_time:73575ms step_avg:nanms
|
| 719 |
+
step:11/6000 train_loss:8.0578 train_time:3418ms step_avg:nanms
|
| 720 |
+
step:12/6000 train_loss:7.8729 train_time:7010ms step_avg:nanms
|
| 721 |
+
step:13/6000 train_loss:8.1569 train_time:10599ms step_avg:3533.14ms
|
| 722 |
+
step:14/6000 train_loss:7.7494 train_time:14186ms step_avg:3546.44ms
|
| 723 |
+
step:15/6000 train_loss:7.7146 train_time:17770ms step_avg:3554.04ms
|
| 724 |
+
step:16/6000 train_loss:7.7069 train_time:21355ms step_avg:3559.16ms
|
| 725 |
+
step:17/6000 train_loss:7.4584 train_time:24940ms step_avg:3562.85ms
|
| 726 |
+
step:18/6000 train_loss:7.7632 train_time:28528ms step_avg:3565.94ms
|
| 727 |
+
step:19/6000 train_loss:7.6637 train_time:32117ms step_avg:3568.56ms
|
| 728 |
+
step:20/6000 train_loss:7.4008 train_time:35709ms step_avg:3570.89ms
|
| 729 |
+
step:21/6000 train_loss:7.6641 train_time:39304ms step_avg:3573.07ms
|
| 730 |
+
step:22/6000 train_loss:7.8608 train_time:42902ms step_avg:3575.21ms
|
| 731 |
+
step:23/6000 train_loss:7.8604 train_time:46505ms step_avg:3577.32ms
|
| 732 |
+
step:24/6000 train_loss:7.5770 train_time:50111ms step_avg:3579.39ms
|
| 733 |
+
step:25/6000 train_loss:7.7302 train_time:53720ms step_avg:3581.37ms
|
| 734 |
+
step:26/6000 train_loss:7.3164 train_time:57331ms step_avg:3583.17ms
|
| 735 |
+
step:27/6000 train_loss:7.3491 train_time:60941ms step_avg:3584.77ms
|
| 736 |
+
step:28/6000 train_loss:7.2141 train_time:64553ms step_avg:3586.30ms
|
| 737 |
+
step:29/6000 train_loss:7.7029 train_time:68168ms step_avg:3587.79ms
|
| 738 |
+
step:30/6000 train_loss:7.4073 train_time:71782ms step_avg:3589.09ms
|
| 739 |
+
step:31/6000 train_loss:7.5908 train_time:75396ms step_avg:3590.26ms
|
| 740 |
+
step:32/6000 train_loss:7.5080 train_time:79011ms step_avg:3591.40ms
|
| 741 |
+
step:33/6000 train_loss:7.4122 train_time:82624ms step_avg:3592.34ms
|
| 742 |
+
step:34/6000 train_loss:9.4005 train_time:86237ms step_avg:3593.19ms
|
| 743 |
+
step:35/6000 train_loss:7.4707 train_time:89853ms step_avg:3594.13ms
|
| 744 |
+
step:36/6000 train_loss:7.5233 train_time:93466ms step_avg:3594.85ms
|
| 745 |
+
step:37/6000 train_loss:7.8468 train_time:97081ms step_avg:3595.58ms
|
| 746 |
+
step:38/6000 train_loss:7.2053 train_time:100696ms step_avg:3596.29ms
|
| 747 |
+
step:39/6000 train_loss:7.4409 train_time:104313ms step_avg:3597.00ms
|
| 748 |
+
step:40/6000 train_loss:7.3196 train_time:107931ms step_avg:3597.71ms
|
| 749 |
+
step:41/6000 train_loss:8.3426 train_time:111548ms step_avg:3598.31ms
|
| 750 |
+
step:42/6000 train_loss:7.2772 train_time:115169ms step_avg:3599.04ms
|
| 751 |
+
step:43/6000 train_loss:7.1849 train_time:118788ms step_avg:3599.63ms
|
| 752 |
+
step:44/6000 train_loss:7.2862 train_time:122406ms step_avg:3600.19ms
|
| 753 |
+
step:45/6000 train_loss:7.9530 train_time:126027ms step_avg:3600.78ms
|
| 754 |
+
step:46/6000 train_loss:7.1989 train_time:129648ms step_avg:3601.34ms
|
| 755 |
+
step:47/6000 train_loss:7.0328 train_time:133269ms step_avg:3601.86ms
|
| 756 |
+
step:48/6000 train_loss:7.1789 train_time:136890ms step_avg:3602.37ms
|
| 757 |
+
step:49/6000 train_loss:6.9812 train_time:140511ms step_avg:3602.85ms
|
| 758 |
+
step:50/6000 train_loss:7.1573 train_time:144136ms step_avg:3603.40ms
|
| 759 |
+
step:51/6000 train_loss:7.1527 train_time:147759ms step_avg:3603.87ms
|
| 760 |
+
step:52/6000 train_loss:7.0693 train_time:151379ms step_avg:3604.25ms
|
| 761 |
+
step:53/6000 train_loss:6.8792 train_time:155002ms step_avg:3604.70ms
|
| 762 |
+
step:54/6000 train_loss:7.1156 train_time:158623ms step_avg:3605.07ms
|
| 763 |
+
step:55/6000 train_loss:7.0874 train_time:162243ms step_avg:3605.40ms
|
| 764 |
+
step:56/6000 train_loss:7.1595 train_time:165868ms step_avg:3605.82ms
|
| 765 |
+
step:57/6000 train_loss:6.7552 train_time:169493ms step_avg:3606.24ms
|
| 766 |
+
step:58/6000 train_loss:7.0669 train_time:173122ms step_avg:3606.71ms
|
| 767 |
+
step:59/6000 train_loss:6.8407 train_time:176751ms step_avg:3607.16ms
|
| 768 |
+
step:60/6000 train_loss:6.9346 train_time:180377ms step_avg:3607.55ms
|
| 769 |
+
step:61/6000 train_loss:6.9461 train_time:184000ms step_avg:3607.85ms
|
| 770 |
+
step:62/6000 train_loss:7.1841 train_time:187653ms step_avg:3608.71ms
|
| 771 |
+
step:63/6000 train_loss:7.0015 train_time:191280ms step_avg:3609.05ms
|
| 772 |
+
step:64/6000 train_loss:6.8388 train_time:194905ms step_avg:3609.36ms
|
| 773 |
+
step:65/6000 train_loss:7.0523 train_time:198534ms step_avg:3609.72ms
|
| 774 |
+
step:66/6000 train_loss:7.1926 train_time:202164ms step_avg:3610.07ms
|
| 775 |
+
step:67/6000 train_loss:6.8519 train_time:205792ms step_avg:3610.38ms
|
| 776 |
+
step:68/6000 train_loss:7.0683 train_time:209421ms step_avg:3610.71ms
|
| 777 |
+
step:69/6000 train_loss:6.9058 train_time:213049ms step_avg:3611.00ms
|
| 778 |
+
step:70/6000 train_loss:6.9991 train_time:216682ms step_avg:3611.36ms
|
| 779 |
+
step:71/6000 train_loss:6.7612 train_time:220313ms step_avg:3611.69ms
|
| 780 |
+
step:72/6000 train_loss:6.5975 train_time:223946ms step_avg:3612.03ms
|
| 781 |
+
step:73/6000 train_loss:7.1052 train_time:227579ms step_avg:3612.36ms
|
| 782 |
+
step:74/6000 train_loss:6.7722 train_time:231210ms step_avg:3612.65ms
|
| 783 |
+
step:75/6000 train_loss:6.6742 train_time:234840ms step_avg:3612.92ms
|
| 784 |
+
step:76/6000 train_loss:7.0480 train_time:238470ms step_avg:3613.19ms
|
| 785 |
+
step:77/6000 train_loss:6.8547 train_time:242107ms step_avg:3613.54ms
|
| 786 |
+
step:78/6000 train_loss:6.8470 train_time:245737ms step_avg:3613.78ms
|
| 787 |
+
step:79/6000 train_loss:6.7268 train_time:249371ms step_avg:3614.07ms
|
| 788 |
+
step:80/6000 train_loss:7.1553 train_time:253006ms step_avg:3614.36ms
|
| 789 |
+
step:81/6000 train_loss:6.3984 train_time:256636ms step_avg:3614.59ms
|
| 790 |
+
step:82/6000 train_loss:6.8663 train_time:260266ms step_avg:3614.81ms
|
| 791 |
+
step:83/6000 train_loss:6.7050 train_time:263897ms step_avg:3615.03ms
|
| 792 |
+
step:84/6000 train_loss:6.7786 train_time:267531ms step_avg:3615.29ms
|
| 793 |
+
step:85/6000 train_loss:6.6998 train_time:271165ms step_avg:3615.53ms
|
| 794 |
+
step:86/6000 train_loss:6.6628 train_time:274797ms step_avg:3615.75ms
|
| 795 |
+
step:87/6000 train_loss:6.8013 train_time:278429ms step_avg:3615.96ms
|
| 796 |
+
step:88/6000 train_loss:6.7195 train_time:282065ms step_avg:3616.22ms
|
| 797 |
+
step:89/6000 train_loss:6.6534 train_time:285698ms step_avg:3616.43ms
|
| 798 |
+
step:90/6000 train_loss:6.3555 train_time:289332ms step_avg:3616.65ms
|
| 799 |
+
step:91/6000 train_loss:6.5996 train_time:292964ms step_avg:3616.84ms
|
| 800 |
+
step:92/6000 train_loss:6.7964 train_time:296597ms step_avg:3617.04ms
|
| 801 |
+
step:93/6000 train_loss:6.3827 train_time:300231ms step_avg:3617.24ms
|
| 802 |
+
step:94/6000 train_loss:6.5403 train_time:303867ms step_avg:3617.46ms
|
| 803 |
+
step:95/6000 train_loss:6.6460 train_time:307502ms step_avg:3617.67ms
|
| 804 |
+
step:96/6000 train_loss:6.6781 train_time:311137ms step_avg:3617.88ms
|
| 805 |
+
step:97/6000 train_loss:6.5490 train_time:314772ms step_avg:3618.07ms
|
| 806 |
+
step:98/6000 train_loss:6.5064 train_time:318408ms step_avg:3618.27ms
|
| 807 |
+
step:99/6000 train_loss:6.5437 train_time:322042ms step_avg:3618.45ms
|
| 808 |
+
step:100/6000 train_loss:6.6042 train_time:325677ms step_avg:3618.63ms
|
| 809 |
+
step:101/6000 train_loss:6.6484 train_time:329309ms step_avg:3618.78ms
|
| 810 |
+
step:102/6000 train_loss:6.4603 train_time:336008ms step_avg:3652.27ms
|
| 811 |
+
step:103/6000 train_loss:6.3463 train_time:339634ms step_avg:3651.97ms
|
| 812 |
+
step:104/6000 train_loss:6.6883 train_time:343258ms step_avg:3651.68ms
|
| 813 |
+
step:105/6000 train_loss:6.6196 train_time:346886ms step_avg:3651.43ms
|
| 814 |
+
step:106/6000 train_loss:6.5088 train_time:350511ms step_avg:3651.16ms
|
| 815 |
+
step:107/6000 train_loss:6.0478 train_time:354138ms step_avg:3650.91ms
|
| 816 |
+
step:108/6000 train_loss:6.9311 train_time:357768ms step_avg:3650.69ms
|
| 817 |
+
step:109/6000 train_loss:6.4557 train_time:361401ms step_avg:3650.52ms
|
| 818 |
+
step:110/6000 train_loss:6.4105 train_time:365033ms step_avg:3650.33ms
|
| 819 |
+
step:111/6000 train_loss:6.5856 train_time:368708ms step_avg:3650.57ms
|
| 820 |
+
step:112/6000 train_loss:6.5822 train_time:372343ms step_avg:3650.42ms
|
| 821 |
+
step:113/6000 train_loss:6.4590 train_time:375977ms step_avg:3650.26ms
|
| 822 |
+
step:114/6000 train_loss:6.2955 train_time:379609ms step_avg:3650.09ms
|
| 823 |
+
step:115/6000 train_loss:6.5445 train_time:383246ms step_avg:3649.96ms
|
| 824 |
+
step:116/6000 train_loss:6.3191 train_time:386879ms step_avg:3649.80ms
|
| 825 |
+
step:117/6000 train_loss:6.4093 train_time:390513ms step_avg:3649.65ms
|
| 826 |
+
step:118/6000 train_loss:6.3633 train_time:394146ms step_avg:3649.50ms
|
| 827 |
+
step:119/6000 train_loss:6.6081 train_time:397782ms step_avg:3649.37ms
|
| 828 |
+
step:120/6000 train_loss:6.4720 train_time:401421ms step_avg:3649.28ms
|
| 829 |
+
step:121/6000 train_loss:6.1903 train_time:405056ms step_avg:3649.15ms
|
| 830 |
+
step:122/6000 train_loss:6.3207 train_time:408690ms step_avg:3649.01ms
|
| 831 |
+
step:123/6000 train_loss:6.3879 train_time:412324ms step_avg:3648.89ms
|
| 832 |
+
step:124/6000 train_loss:6.4137 train_time:415959ms step_avg:3648.77ms
|
| 833 |
+
step:125/6000 train_loss:6.1249 train_time:419590ms step_avg:3648.61ms
|
| 834 |
+
step:125/6000 val_loss:6.3503 train_time:419759ms step_avg:3650.08ms
|
| 835 |
+
step:126/6000 train_loss:6.3516 train_time:423202ms step_avg:3648.29ms
|
| 836 |
+
step:127/6000 train_loss:6.3710 train_time:426824ms step_avg:3648.07ms
|
| 837 |
+
step:128/6000 train_loss:6.1285 train_time:430451ms step_avg:3647.89ms
|
| 838 |
+
step:129/6000 train_loss:7.3333 train_time:434082ms step_avg:3647.75ms
|
| 839 |
+
step:130/6000 train_loss:6.3944 train_time:437713ms step_avg:3647.61ms
|
| 840 |
+
step:131/6000 train_loss:6.2378 train_time:441347ms step_avg:3647.50ms
|
| 841 |
+
step:132/6000 train_loss:6.2177 train_time:444987ms step_avg:3647.44ms
|
| 842 |
+
step:133/6000 train_loss:6.4731 train_time:448625ms step_avg:3647.36ms
|
| 843 |
+
step:134/6000 train_loss:6.2464 train_time:452263ms step_avg:3647.28ms
|
| 844 |
+
step:135/6000 train_loss:6.1878 train_time:455899ms step_avg:3647.19ms
|
| 845 |
+
step:136/6000 train_loss:6.1496 train_time:459539ms step_avg:3647.13ms
|
| 846 |
+
step:137/6000 train_loss:6.6168 train_time:463175ms step_avg:3647.05ms
|
| 847 |
+
step:138/6000 train_loss:6.0787 train_time:466814ms step_avg:3646.98ms
|
| 848 |
+
step:139/6000 train_loss:6.2357 train_time:470450ms step_avg:3646.90ms
|
| 849 |
+
step:140/6000 train_loss:6.3620 train_time:474086ms step_avg:3646.82ms
|
| 850 |
+
step:141/6000 train_loss:6.3892 train_time:477721ms step_avg:3646.73ms
|
| 851 |
+
step:142/6000 train_loss:6.1125 train_time:481357ms step_avg:3646.64ms
|
| 852 |
+
step:143/6000 train_loss:6.4248 train_time:484992ms step_avg:3646.56ms
|
| 853 |
+
step:144/6000 train_loss:5.9143 train_time:488625ms step_avg:3646.46ms
|
| 854 |
+
step:145/6000 train_loss:6.2179 train_time:492259ms step_avg:3646.36ms
|
| 855 |
+
step:146/6000 train_loss:7.0175 train_time:495892ms step_avg:3646.26ms
|
| 856 |
+
step:147/6000 train_loss:6.1695 train_time:499525ms step_avg:3646.17ms
|
| 857 |
+
step:148/6000 train_loss:6.1482 train_time:503160ms step_avg:3646.09ms
|
| 858 |
+
step:149/6000 train_loss:6.2544 train_time:506793ms step_avg:3646.00ms
|
| 859 |
+
step:150/6000 train_loss:5.9509 train_time:510426ms step_avg:3645.90ms
|
| 860 |
+
step:151/6000 train_loss:5.9444 train_time:514058ms step_avg:3645.80ms
|
| 861 |
+
step:152/6000 train_loss:6.3931 train_time:517691ms step_avg:3645.71ms
|
| 862 |
+
step:153/6000 train_loss:6.1485 train_time:521324ms step_avg:3645.62ms
|
| 863 |
+
step:154/6000 train_loss:6.1083 train_time:524955ms step_avg:3645.52ms
|
| 864 |
+
step:155/6000 train_loss:6.1417 train_time:528589ms step_avg:3645.44ms
|
| 865 |
+
step:156/6000 train_loss:6.2398 train_time:532221ms step_avg:3645.35ms
|
| 866 |
+
step:157/6000 train_loss:5.9681 train_time:535855ms step_avg:3645.27ms
|
| 867 |
+
step:158/6000 train_loss:6.1106 train_time:539492ms step_avg:3645.22ms
|
| 868 |
+
step:159/6000 train_loss:5.9468 train_time:543125ms step_avg:3645.13ms
|
| 869 |
+
step:160/6000 train_loss:5.8854 train_time:546760ms step_avg:3645.07ms
|
| 870 |
+
step:161/6000 train_loss:6.0888 train_time:550392ms step_avg:3644.98ms
|
| 871 |
+
step:162/6000 train_loss:6.3009 train_time:554027ms step_avg:3644.91ms
|
| 872 |
+
step:163/6000 train_loss:6.0815 train_time:557659ms step_avg:3644.83ms
|
| 873 |
+
step:164/6000 train_loss:6.1051 train_time:561295ms step_avg:3644.77ms
|
| 874 |
+
step:165/6000 train_loss:6.0687 train_time:564932ms step_avg:3644.72ms
|
| 875 |
+
step:166/6000 train_loss:6.1320 train_time:568569ms step_avg:3644.67ms
|
| 876 |
+
step:167/6000 train_loss:5.8841 train_time:572202ms step_avg:3644.60ms
|
| 877 |
+
step:168/6000 train_loss:6.2607 train_time:575835ms step_avg:3644.53ms
|
| 878 |
+
step:169/6000 train_loss:6.1623 train_time:579472ms step_avg:3644.48ms
|
| 879 |
+
step:170/6000 train_loss:6.0316 train_time:583107ms step_avg:3644.42ms
|
| 880 |
+
step:171/6000 train_loss:6.1312 train_time:586746ms step_avg:3644.38ms
|
| 881 |
+
step:172/6000 train_loss:6.0583 train_time:590511ms step_avg:3645.13ms
|
| 882 |
+
step:173/6000 train_loss:6.4348 train_time:594604ms step_avg:3647.88ms
|
| 883 |
+
step:174/6000 train_loss:5.9688 train_time:598563ms step_avg:3649.78ms
|
| 884 |
+
step:175/6000 train_loss:6.1002 train_time:602392ms step_avg:3650.86ms
|
| 885 |
+
step:176/6000 train_loss:6.1945 train_time:606298ms step_avg:3652.40ms
|
| 886 |
+
step:177/6000 train_loss:5.9368 train_time:609928ms step_avg:3652.27ms
|
| 887 |
+
step:178/6000 train_loss:6.0119 train_time:613561ms step_avg:3652.15ms
|
| 888 |
+
step:179/6000 train_loss:5.9153 train_time:617193ms step_avg:3652.03ms
|
| 889 |
+
step:180/6000 train_loss:7.6726 train_time:620823ms step_avg:3651.90ms
|
| 890 |
+
step:181/6000 train_loss:6.2627 train_time:624450ms step_avg:3651.75ms
|
| 891 |
+
step:182/6000 train_loss:6.1363 train_time:628079ms step_avg:3651.62ms
|
| 892 |
+
step:183/6000 train_loss:6.3809 train_time:631711ms step_avg:3651.51ms
|
| 893 |
+
step:184/6000 train_loss:6.0319 train_time:635341ms step_avg:3651.39ms
|
| 894 |
+
step:185/6000 train_loss:5.8488 train_time:638971ms step_avg:3651.26ms
|
| 895 |
+
step:186/6000 train_loss:6.1014 train_time:642602ms step_avg:3651.15ms
|
| 896 |
+
step:187/6000 train_loss:5.7467 train_time:646230ms step_avg:3651.02ms
|
| 897 |
+
step:188/6000 train_loss:5.7764 train_time:649859ms step_avg:3650.89ms
|
| 898 |
+
step:189/6000 train_loss:6.0604 train_time:653488ms step_avg:3650.77ms
|
| 899 |
+
step:190/6000 train_loss:6.0465 train_time:657117ms step_avg:3650.65ms
|
| 900 |
+
step:191/6000 train_loss:6.0815 train_time:660747ms step_avg:3650.54ms
|
| 901 |
+
step:192/6000 train_loss:5.9892 train_time:664377ms step_avg:3650.42ms
|
| 902 |
+
step:193/6000 train_loss:5.9546 train_time:668005ms step_avg:3650.30ms
|
| 903 |
+
step:194/6000 train_loss:5.8600 train_time:671634ms step_avg:3650.18ms
|
| 904 |
+
step:195/6000 train_loss:5.7750 train_time:675259ms step_avg:3650.05ms
|
| 905 |
+
step:196/6000 train_loss:6.1322 train_time:678887ms step_avg:3649.93ms
|
| 906 |
+
step:197/6000 train_loss:6.0107 train_time:682513ms step_avg:3649.80ms
|
| 907 |
+
step:198/6000 train_loss:6.0312 train_time:686141ms step_avg:3649.69ms
|
| 908 |
+
step:199/6000 train_loss:5.9149 train_time:689772ms step_avg:3649.59ms
|
| 909 |
+
step:200/6000 train_loss:5.9250 train_time:693402ms step_avg:3649.48ms
|
| 910 |
+
step:201/6000 train_loss:6.6072 train_time:697029ms step_avg:3649.36ms
|
| 911 |
+
step:202/6000 train_loss:5.6575 train_time:700659ms step_avg:3649.26ms
|
| 912 |
+
step:203/6000 train_loss:5.8952 train_time:704289ms step_avg:3649.17ms
|
| 913 |
+
step:204/6000 train_loss:5.9848 train_time:708493ms step_avg:3652.03ms
|
| 914 |
+
step:205/6000 train_loss:5.8951 train_time:712126ms step_avg:3651.93ms
|
| 915 |
+
step:206/6000 train_loss:6.0283 train_time:715761ms step_avg:3651.84ms
|
| 916 |
+
step:207/6000 train_loss:6.0199 train_time:719393ms step_avg:3651.74ms
|
| 917 |
+
step:208/6000 train_loss:5.8813 train_time:723026ms step_avg:3651.65ms
|
| 918 |
+
step:209/6000 train_loss:5.7999 train_time:726660ms step_avg:3651.56ms
|
| 919 |
+
step:210/6000 train_loss:5.7682 train_time:730295ms step_avg:3651.47ms
|
| 920 |
+
step:211/6000 train_loss:5.8633 train_time:733930ms step_avg:3651.39ms
|
| 921 |
+
step:212/6000 train_loss:5.7512 train_time:737564ms step_avg:3651.31ms
|
| 922 |
+
step:213/6000 train_loss:6.0230 train_time:741198ms step_avg:3651.22ms
|
| 923 |
+
step:214/6000 train_loss:5.8911 train_time:744830ms step_avg:3651.13ms
|
| 924 |
+
step:215/6000 train_loss:5.7286 train_time:748464ms step_avg:3651.05ms
|
| 925 |
+
step:216/6000 train_loss:5.7594 train_time:752099ms step_avg:3650.97ms
|
| 926 |
+
step:217/6000 train_loss:5.9727 train_time:755734ms step_avg:3650.89ms
|
| 927 |
+
step:218/6000 train_loss:5.7936 train_time:759367ms step_avg:3650.80ms
|
| 928 |
+
step:219/6000 train_loss:5.7284 train_time:763002ms step_avg:3650.73ms
|
| 929 |
+
step:220/6000 train_loss:5.8436 train_time:766638ms step_avg:3650.65ms
|
| 930 |
+
step:221/6000 train_loss:5.7957 train_time:770271ms step_avg:3650.58ms
|
| 931 |
+
step:222/6000 train_loss:5.7195 train_time:773908ms step_avg:3650.51ms
|
| 932 |
+
step:223/6000 train_loss:5.7989 train_time:777546ms step_avg:3650.45ms
|
| 933 |
+
step:224/6000 train_loss:5.6911 train_time:781181ms step_avg:3650.38ms
|
| 934 |
+
step:225/6000 train_loss:5.7737 train_time:784814ms step_avg:3650.30ms
|
| 935 |
+
step:226/6000 train_loss:5.6200 train_time:788448ms step_avg:3650.22ms
|
| 936 |
+
step:227/6000 train_loss:5.8136 train_time:792084ms step_avg:3650.16ms
|
| 937 |
+
step:228/6000 train_loss:5.7588 train_time:795721ms step_avg:3650.10ms
|
| 938 |
+
step:229/6000 train_loss:5.5865 train_time:799353ms step_avg:3650.02ms
|
| 939 |
+
step:230/6000 train_loss:5.5816 train_time:802984ms step_avg:3649.93ms
|
| 940 |
+
step:231/6000 train_loss:5.6370 train_time:806619ms step_avg:3649.86ms
|
| 941 |
+
step:232/6000 train_loss:5.9022 train_time:810252ms step_avg:3649.78ms
|
| 942 |
+
step:233/6000 train_loss:5.6609 train_time:813887ms step_avg:3649.72ms
|
| 943 |
+
step:234/6000 train_loss:5.7494 train_time:817523ms step_avg:3649.65ms
|
| 944 |
+
step:235/6000 train_loss:5.7121 train_time:821158ms step_avg:3649.59ms
|
| 945 |
+
step:236/6000 train_loss:5.8656 train_time:824790ms step_avg:3649.51ms
|
| 946 |
+
step:237/6000 train_loss:5.8989 train_time:828425ms step_avg:3649.45ms
|
| 947 |
+
step:238/6000 train_loss:5.4669 train_time:832059ms step_avg:3649.38ms
|
| 948 |
+
step:239/6000 train_loss:5.8470 train_time:835692ms step_avg:3649.31ms
|
| 949 |
+
step:240/6000 train_loss:5.6743 train_time:839328ms step_avg:3649.25ms
|
| 950 |
+
step:241/6000 train_loss:5.7068 train_time:842960ms step_avg:3649.18ms
|
| 951 |
+
step:242/6000 train_loss:5.4761 train_time:846593ms step_avg:3649.11ms
|
| 952 |
+
step:243/6000 train_loss:5.5982 train_time:850225ms step_avg:3649.03ms
|
| 953 |
+
step:244/6000 train_loss:5.5803 train_time:853857ms step_avg:3648.96ms
|
| 954 |
+
step:245/6000 train_loss:5.6870 train_time:857489ms step_avg:3648.89ms
|
| 955 |
+
step:246/6000 train_loss:5.6025 train_time:861123ms step_avg:3648.82ms
|
| 956 |
+
step:247/6000 train_loss:5.3587 train_time:864757ms step_avg:3648.76ms
|
| 957 |
+
step:248/6000 train_loss:5.5798 train_time:868386ms step_avg:3648.68ms
|
| 958 |
+
step:249/6000 train_loss:5.6800 train_time:872018ms step_avg:3648.61ms
|
| 959 |
+
step:250/6000 train_loss:5.5458 train_time:875648ms step_avg:3648.54ms
|
| 960 |
+
step:250/6000 val_loss:5.6178 train_time:875817ms step_avg:3649.24ms
|
| 961 |
+
step:251/6000 train_loss:5.5165 train_time:879271ms step_avg:3648.43ms
|
| 962 |
+
step:252/6000 train_loss:5.3918 train_time:882901ms step_avg:3648.35ms
|
| 963 |
+
step:253/6000 train_loss:5.8173 train_time:886535ms step_avg:3648.29ms
|
| 964 |
+
step:254/6000 train_loss:5.5009 train_time:890167ms step_avg:3648.23ms
|
| 965 |
+
step:255/6000 train_loss:5.5552 train_time:893801ms step_avg:3648.17ms
|
| 966 |
+
step:256/6000 train_loss:5.5676 train_time:897437ms step_avg:3648.12ms
|
| 967 |
+
step:257/6000 train_loss:5.5240 train_time:901071ms step_avg:3648.06ms
|
| 968 |
+
step:258/6000 train_loss:5.4110 train_time:904707ms step_avg:3648.01ms
|
| 969 |
+
step:259/6000 train_loss:5.6645 train_time:908343ms step_avg:3647.96ms
|
| 970 |
+
step:260/6000 train_loss:5.5284 train_time:911981ms step_avg:3647.92ms
|
| 971 |
+
step:261/6000 train_loss:5.6656 train_time:915616ms step_avg:3647.87ms
|
| 972 |
+
step:262/6000 train_loss:5.6471 train_time:919253ms step_avg:3647.83ms
|
| 973 |
+
step:263/6000 train_loss:5.6162 train_time:922891ms step_avg:3647.79ms
|
| 974 |
+
step:264/6000 train_loss:5.3869 train_time:926530ms step_avg:3647.76ms
|
| 975 |
+
step:265/6000 train_loss:5.4900 train_time:930168ms step_avg:3647.72ms
|
| 976 |
+
step:266/6000 train_loss:5.6796 train_time:933808ms step_avg:3647.69ms
|
| 977 |
+
step:267/6000 train_loss:5.7344 train_time:937447ms step_avg:3647.65ms
|
| 978 |
+
step:268/6000 train_loss:7.0577 train_time:941084ms step_avg:3647.61ms
|
| 979 |
+
step:269/6000 train_loss:5.4364 train_time:944719ms step_avg:3647.56ms
|
| 980 |
+
step:270/6000 train_loss:5.5607 train_time:948356ms step_avg:3647.52ms
|
| 981 |
+
step:271/6000 train_loss:5.5827 train_time:951989ms step_avg:3647.47ms
|
| 982 |
+
step:272/6000 train_loss:5.5074 train_time:955626ms step_avg:3647.43ms
|
| 983 |
+
step:273/6000 train_loss:5.7344 train_time:959261ms step_avg:3647.38ms
|
| 984 |
+
step:274/6000 train_loss:5.6101 train_time:962895ms step_avg:3647.33ms
|
| 985 |
+
step:275/6000 train_loss:5.4088 train_time:966528ms step_avg:3647.28ms
|
| 986 |
+
step:276/6000 train_loss:5.5844 train_time:970162ms step_avg:3647.23ms
|
| 987 |
+
step:277/6000 train_loss:5.2656 train_time:973799ms step_avg:3647.19ms
|
| 988 |
+
step:278/6000 train_loss:5.4143 train_time:977437ms step_avg:3647.15ms
|
| 989 |
+
step:279/6000 train_loss:5.5655 train_time:981075ms step_avg:3647.12ms
|
| 990 |
+
step:280/6000 train_loss:5.5094 train_time:984714ms step_avg:3647.09ms
|
| 991 |
+
step:281/6000 train_loss:5.6088 train_time:988349ms step_avg:3647.04ms
|
| 992 |
+
step:282/6000 train_loss:5.3972 train_time:991985ms step_avg:3647.00ms
|
| 993 |
+
step:283/6000 train_loss:5.4071 train_time:995623ms step_avg:3646.97ms
|
| 994 |
+
step:284/6000 train_loss:5.2755 train_time:999261ms step_avg:3646.94ms
|
| 995 |
+
step:285/6000 train_loss:5.4246 train_time:1002900ms step_avg:3646.91ms
|
| 996 |
+
step:286/6000 train_loss:5.4583 train_time:1006538ms step_avg:3646.88ms
|
| 997 |
+
step:287/6000 train_loss:5.4784 train_time:1010175ms step_avg:3646.84ms
|
| 998 |
+
step:288/6000 train_loss:5.2774 train_time:1013813ms step_avg:3646.81ms
|
| 999 |
+
step:289/6000 train_loss:5.3768 train_time:1017451ms step_avg:3646.78ms
|
| 1000 |
+
step:290/6000 train_loss:5.4798 train_time:1021090ms step_avg:3646.75ms
|
| 1001 |
+
step:291/6000 train_loss:5.5539 train_time:1024726ms step_avg:3646.71ms
|
| 1002 |
+
step:292/6000 train_loss:5.4353 train_time:1028360ms step_avg:3646.67ms
|
| 1003 |
+
step:293/6000 train_loss:5.6088 train_time:1031998ms step_avg:3646.64ms
|
| 1004 |
+
step:294/6000 train_loss:5.4513 train_time:1035638ms step_avg:3646.61ms
|
| 1005 |
+
step:295/6000 train_loss:5.7613 train_time:1039272ms step_avg:3646.57ms
|
| 1006 |
+
step:296/6000 train_loss:5.5382 train_time:1042908ms step_avg:3646.53ms
|
| 1007 |
+
step:297/6000 train_loss:5.6979 train_time:1046542ms step_avg:3646.49ms
|
| 1008 |
+
step:298/6000 train_loss:5.4610 train_time:1050179ms step_avg:3646.46ms
|
| 1009 |
+
step:299/6000 train_loss:5.7934 train_time:1053817ms step_avg:3646.43ms
|
| 1010 |
+
step:300/6000 train_loss:5.3698 train_time:1057457ms step_avg:3646.40ms
|
| 1011 |
+
step:301/6000 train_loss:5.3514 train_time:1061091ms step_avg:3646.36ms
|
| 1012 |
+
step:302/6000 train_loss:5.3965 train_time:1064729ms step_avg:3646.33ms
|
| 1013 |
+
step:303/6000 train_loss:5.2803 train_time:1068366ms step_avg:3646.30ms
|
| 1014 |
+
step:304/6000 train_loss:5.6529 train_time:1072003ms step_avg:3646.27ms
|
| 1015 |
+
step:305/6000 train_loss:5.1238 train_time:1078684ms step_avg:3656.56ms
|
| 1016 |
+
step:306/6000 train_loss:5.3095 train_time:1082311ms step_avg:3656.46ms
|
| 1017 |
+
step:307/6000 train_loss:5.2862 train_time:1085939ms step_avg:3656.36ms
|
| 1018 |
+
step:308/6000 train_loss:5.6199 train_time:1089570ms step_avg:3656.28ms
|
| 1019 |
+
step:309/6000 train_loss:5.3509 train_time:1093200ms step_avg:3656.19ms
|
| 1020 |
+
step:310/6000 train_loss:5.3536 train_time:1096834ms step_avg:3656.11ms
|
| 1021 |
+
step:311/6000 train_loss:5.2718 train_time:1100466ms step_avg:3656.03ms
|
| 1022 |
+
step:312/6000 train_loss:5.4072 train_time:1104094ms step_avg:3655.94ms
|
| 1023 |
+
step:313/6000 train_loss:5.4180 train_time:1107727ms step_avg:3655.86ms
|
| 1024 |
+
step:314/6000 train_loss:5.2266 train_time:1111361ms step_avg:3655.79ms
|
| 1025 |
+
step:315/6000 train_loss:5.1666 train_time:1114993ms step_avg:3655.72ms
|
| 1026 |
+
step:316/6000 train_loss:5.5031 train_time:1118625ms step_avg:3655.64ms
|
| 1027 |
+
step:317/6000 train_loss:5.4267 train_time:1122257ms step_avg:3655.56ms
|
| 1028 |
+
step:318/6000 train_loss:5.4147 train_time:1125887ms step_avg:3655.48ms
|
| 1029 |
+
step:319/6000 train_loss:5.2924 train_time:1129515ms step_avg:3655.39ms
|
| 1030 |
+
step:320/6000 train_loss:5.2803 train_time:1133149ms step_avg:3655.32ms
|
| 1031 |
+
step:321/6000 train_loss:5.2647 train_time:1136778ms step_avg:3655.23ms
|
| 1032 |
+
step:322/6000 train_loss:5.2246 train_time:1140408ms step_avg:3655.15ms
|
| 1033 |
+
step:323/6000 train_loss:5.4791 train_time:1144038ms step_avg:3655.07ms
|
| 1034 |
+
step:324/6000 train_loss:5.2629 train_time:1147668ms step_avg:3654.99ms
|
| 1035 |
+
step:325/6000 train_loss:5.3213 train_time:1151298ms step_avg:3654.91ms
|
| 1036 |
+
step:326/6000 train_loss:5.6975 train_time:1154927ms step_avg:3654.83ms
|
| 1037 |
+
step:327/6000 train_loss:5.3158 train_time:1158555ms step_avg:3654.75ms
|
| 1038 |
+
step:328/6000 train_loss:5.7257 train_time:1162186ms step_avg:3654.67ms
|
| 1039 |
+
step:329/6000 train_loss:5.1292 train_time:1165816ms step_avg:3654.59ms
|
| 1040 |
+
step:330/6000 train_loss:5.2763 train_time:1169447ms step_avg:3654.52ms
|
| 1041 |
+
step:331/6000 train_loss:5.4406 train_time:1173079ms step_avg:3654.45ms
|
| 1042 |
+
step:332/6000 train_loss:5.3636 train_time:1176711ms step_avg:3654.38ms
|
| 1043 |
+
step:333/6000 train_loss:5.2195 train_time:1180341ms step_avg:3654.31ms
|
| 1044 |
+
step:334/6000 train_loss:5.4190 train_time:1183971ms step_avg:3654.23ms
|
| 1045 |
+
step:335/6000 train_loss:5.0259 train_time:1187604ms step_avg:3654.17ms
|
| 1046 |
+
step:336/6000 train_loss:5.3140 train_time:1191234ms step_avg:3654.09ms
|
| 1047 |
+
step:337/6000 train_loss:5.4605 train_time:1194868ms step_avg:3654.03ms
|
| 1048 |
+
step:338/6000 train_loss:5.2046 train_time:1198499ms step_avg:3653.96ms
|
| 1049 |
+
step:339/6000 train_loss:5.0618 train_time:1202130ms step_avg:3653.89ms
|
| 1050 |
+
step:340/6000 train_loss:5.1788 train_time:1205760ms step_avg:3653.82ms
|
| 1051 |
+
step:341/6000 train_loss:5.4024 train_time:1209388ms step_avg:3653.74ms
|
| 1052 |
+
step:342/6000 train_loss:5.2952 train_time:1213018ms step_avg:3653.67ms
|
| 1053 |
+
step:343/6000 train_loss:5.6976 train_time:1216648ms step_avg:3653.60ms
|
| 1054 |
+
step:344/6000 train_loss:5.2602 train_time:1220278ms step_avg:3653.53ms
|
| 1055 |
+
step:345/6000 train_loss:5.1481 train_time:1223904ms step_avg:3653.45ms
|
| 1056 |
+
step:346/6000 train_loss:5.7334 train_time:1227533ms step_avg:3653.37ms
|
| 1057 |
+
step:347/6000 train_loss:5.3615 train_time:1231164ms step_avg:3653.31ms
|
| 1058 |
+
step:348/6000 train_loss:5.0662 train_time:1234797ms step_avg:3653.25ms
|
| 1059 |
+
step:349/6000 train_loss:5.2285 train_time:1238430ms step_avg:3653.19ms
|
| 1060 |
+
step:350/6000 train_loss:5.2518 train_time:1242062ms step_avg:3653.12ms
|
| 1061 |
+
step:351/6000 train_loss:5.1926 train_time:1245695ms step_avg:3653.06ms
|
| 1062 |
+
step:352/6000 train_loss:5.1052 train_time:1249327ms step_avg:3653.00ms
|
| 1063 |
+
step:353/6000 train_loss:5.1129 train_time:1252957ms step_avg:3652.94ms
|
| 1064 |
+
step:354/6000 train_loss:5.1330 train_time:1256589ms step_avg:3652.88ms
|
| 1065 |
+
step:355/6000 train_loss:5.1074 train_time:1260221ms step_avg:3652.82ms
|
| 1066 |
+
step:356/6000 train_loss:5.3666 train_time:1263856ms step_avg:3652.76ms
|
| 1067 |
+
step:357/6000 train_loss:5.0848 train_time:1267488ms step_avg:3652.70ms
|
| 1068 |
+
step:358/6000 train_loss:4.8210 train_time:1271121ms step_avg:3652.65ms
|
| 1069 |
+
step:359/6000 train_loss:5.6146 train_time:1274753ms step_avg:3652.59ms
|
| 1070 |
+
step:360/6000 train_loss:5.1904 train_time:1278387ms step_avg:3652.53ms
|
| 1071 |
+
step:361/6000 train_loss:5.0990 train_time:1282019ms step_avg:3652.47ms
|
| 1072 |
+
step:362/6000 train_loss:5.2032 train_time:1285650ms step_avg:3652.41ms
|
| 1073 |
+
step:363/6000 train_loss:5.1398 train_time:1289281ms step_avg:3652.36ms
|
| 1074 |
+
step:364/6000 train_loss:5.1299 train_time:1292912ms step_avg:3652.29ms
|
| 1075 |
+
step:365/6000 train_loss:5.2104 train_time:1296546ms step_avg:3652.24ms
|
| 1076 |
+
step:366/6000 train_loss:5.2871 train_time:1300178ms step_avg:3652.18ms
|
| 1077 |
+
step:367/6000 train_loss:5.0060 train_time:1303809ms step_avg:3652.13ms
|
| 1078 |
+
step:368/6000 train_loss:5.5723 train_time:1307441ms step_avg:3652.07ms
|
| 1079 |
+
step:369/6000 train_loss:5.1601 train_time:1311073ms step_avg:3652.02ms
|
| 1080 |
+
step:370/6000 train_loss:4.9992 train_time:1314707ms step_avg:3651.96ms
|
| 1081 |
+
step:371/6000 train_loss:5.0609 train_time:1318341ms step_avg:3651.92ms
|
| 1082 |
+
step:372/6000 train_loss:5.1341 train_time:1321975ms step_avg:3651.86ms
|
| 1083 |
+
step:373/6000 train_loss:5.0960 train_time:1325607ms step_avg:3651.81ms
|
| 1084 |
+
step:374/6000 train_loss:5.1838 train_time:1329238ms step_avg:3651.75ms
|
| 1085 |
+
step:375/6000 train_loss:5.0907 train_time:1332873ms step_avg:3651.71ms
|
| 1086 |
+
step:375/6000 val_loss:5.1201 train_time:1333040ms step_avg:3652.17ms
|
| 1087 |
+
step:376/6000 train_loss:5.0016 train_time:1336494ms step_avg:3651.62ms
|
| 1088 |
+
step:377/6000 train_loss:5.1430 train_time:1340121ms step_avg:3651.56ms
|
| 1089 |
+
step:378/6000 train_loss:4.9709 train_time:1343749ms step_avg:3651.49ms
|
| 1090 |
+
step:379/6000 train_loss:5.0950 train_time:1347379ms step_avg:3651.43ms
|
| 1091 |
+
step:380/6000 train_loss:4.8594 train_time:1351007ms step_avg:3651.37ms
|
| 1092 |
+
step:381/6000 train_loss:5.2141 train_time:1354630ms step_avg:3651.29ms
|
| 1093 |
+
step:382/6000 train_loss:5.2358 train_time:1358257ms step_avg:3651.23ms
|
| 1094 |
+
step:383/6000 train_loss:5.2249 train_time:1361883ms step_avg:3651.16ms
|
| 1095 |
+
step:384/6000 train_loss:5.2974 train_time:1365511ms step_avg:3651.10ms
|
| 1096 |
+
step:385/6000 train_loss:5.1060 train_time:1369139ms step_avg:3651.04ms
|
| 1097 |
+
step:386/6000 train_loss:5.0367 train_time:1372768ms step_avg:3650.98ms
|
| 1098 |
+
step:387/6000 train_loss:5.1220 train_time:1376393ms step_avg:3650.91ms
|
| 1099 |
+
step:388/6000 train_loss:5.3479 train_time:1380023ms step_avg:3650.85ms
|
| 1100 |
+
step:389/6000 train_loss:5.0079 train_time:1383652ms step_avg:3650.80ms
|
| 1101 |
+
step:390/6000 train_loss:5.3030 train_time:1387280ms step_avg:3650.74ms
|
| 1102 |
+
step:391/6000 train_loss:5.3397 train_time:1390905ms step_avg:3650.67ms
|
| 1103 |
+
step:392/6000 train_loss:5.1100 train_time:1394530ms step_avg:3650.60ms
|
| 1104 |
+
step:393/6000 train_loss:5.1160 train_time:1398156ms step_avg:3650.54ms
|
| 1105 |
+
step:394/6000 train_loss:5.1212 train_time:1401784ms step_avg:3650.48ms
|
| 1106 |
+
step:395/6000 train_loss:5.1876 train_time:1405411ms step_avg:3650.42ms
|
| 1107 |
+
step:396/6000 train_loss:5.1296 train_time:1409037ms step_avg:3650.36ms
|
| 1108 |
+
step:397/6000 train_loss:5.7646 train_time:1412664ms step_avg:3650.29ms
|
| 1109 |
+
step:398/6000 train_loss:5.0229 train_time:1416291ms step_avg:3650.23ms
|
| 1110 |
+
step:399/6000 train_loss:4.9349 train_time:1419919ms step_avg:3650.18ms
|
| 1111 |
+
step:400/6000 train_loss:5.1571 train_time:1423549ms step_avg:3650.12ms
|
| 1112 |
+
step:401/6000 train_loss:5.0514 train_time:1427174ms step_avg:3650.06ms
|
| 1113 |
+
step:402/6000 train_loss:5.1039 train_time:1430799ms step_avg:3650.00ms
|
| 1114 |
+
step:403/6000 train_loss:5.5100 train_time:1434429ms step_avg:3649.95ms
|
| 1115 |
+
step:404/6000 train_loss:4.9590 train_time:1438055ms step_avg:3649.88ms
|
| 1116 |
+
step:405/6000 train_loss:5.2261 train_time:1441681ms step_avg:3649.83ms
|
| 1117 |
+
step:406/6000 train_loss:4.9467 train_time:1445307ms step_avg:3649.76ms
|
| 1118 |
+
step:407/6000 train_loss:5.0047 train_time:1452009ms step_avg:3657.45ms
|
| 1119 |
+
step:408/6000 train_loss:5.1258 train_time:1455630ms step_avg:3657.36ms
|
| 1120 |
+
step:409/6000 train_loss:5.2743 train_time:1459252ms step_avg:3657.27ms
|
| 1121 |
+
step:410/6000 train_loss:4.9895 train_time:1462876ms step_avg:3657.19ms
|
| 1122 |
+
step:411/6000 train_loss:4.9393 train_time:1466498ms step_avg:3657.10ms
|
| 1123 |
+
step:412/6000 train_loss:5.2676 train_time:1470123ms step_avg:3657.02ms
|
| 1124 |
+
step:413/6000 train_loss:4.9466 train_time:1473749ms step_avg:3656.95ms
|
| 1125 |
+
step:414/6000 train_loss:5.0864 train_time:1477375ms step_avg:3656.87ms
|
| 1126 |
+
step:415/6000 train_loss:4.8074 train_time:1481002ms step_avg:3656.80ms
|
| 1127 |
+
step:416/6000 train_loss:5.1143 train_time:1484629ms step_avg:3656.72ms
|
| 1128 |
+
step:417/6000 train_loss:5.0373 train_time:1488261ms step_avg:3656.66ms
|
| 1129 |
+
step:418/6000 train_loss:4.9261 train_time:1491890ms step_avg:3656.59ms
|
| 1130 |
+
step:419/6000 train_loss:4.8459 train_time:1495522ms step_avg:3656.53ms
|
| 1131 |
+
step:420/6000 train_loss:4.4611 train_time:1499151ms step_avg:3656.47ms
|
| 1132 |
+
step:421/6000 train_loss:5.0802 train_time:1502779ms step_avg:3656.40ms
|
| 1133 |
+
step:422/6000 train_loss:4.9679 train_time:1506410ms step_avg:3656.33ms
|
| 1134 |
+
step:423/6000 train_loss:4.8546 train_time:1510039ms step_avg:3656.27ms
|
| 1135 |
+
step:424/6000 train_loss:4.9717 train_time:1513668ms step_avg:3656.20ms
|
| 1136 |
+
step:425/6000 train_loss:4.8077 train_time:1517296ms step_avg:3656.13ms
|
| 1137 |
+
step:426/6000 train_loss:4.9959 train_time:1520923ms step_avg:3656.06ms
|
| 1138 |
+
step:427/6000 train_loss:5.0800 train_time:1524552ms step_avg:3656.00ms
|
| 1139 |
+
step:428/6000 train_loss:5.0019 train_time:1528182ms step_avg:3655.94ms
|
| 1140 |
+
step:429/6000 train_loss:5.0183 train_time:1531809ms step_avg:3655.87ms
|
| 1141 |
+
step:430/6000 train_loss:4.0808 train_time:1535437ms step_avg:3655.80ms
|
| 1142 |
+
step:431/6000 train_loss:4.8463 train_time:1539064ms step_avg:3655.73ms
|
| 1143 |
+
step:432/6000 train_loss:4.8161 train_time:1542694ms step_avg:3655.67ms
|
| 1144 |
+
step:433/6000 train_loss:5.0160 train_time:1546320ms step_avg:3655.60ms
|
| 1145 |
+
step:434/6000 train_loss:5.0845 train_time:1549950ms step_avg:3655.54ms
|
| 1146 |
+
step:435/6000 train_loss:4.9424 train_time:1553577ms step_avg:3655.47ms
|
| 1147 |
+
step:436/6000 train_loss:5.0838 train_time:1557205ms step_avg:3655.41ms
|
| 1148 |
+
step:437/6000 train_loss:5.2553 train_time:1560834ms step_avg:3655.35ms
|
| 1149 |
+
step:438/6000 train_loss:5.0068 train_time:1564461ms step_avg:3655.28ms
|
| 1150 |
+
step:439/6000 train_loss:5.0367 train_time:1568091ms step_avg:3655.22ms
|
| 1151 |
+
step:440/6000 train_loss:4.9741 train_time:1571720ms step_avg:3655.16ms
|
| 1152 |
+
step:441/6000 train_loss:4.9165 train_time:1575351ms step_avg:3655.11ms
|
| 1153 |
+
step:442/6000 train_loss:5.3550 train_time:1578982ms step_avg:3655.05ms
|
| 1154 |
+
step:443/6000 train_loss:5.0345 train_time:1582611ms step_avg:3654.99ms
|
| 1155 |
+
step:444/6000 train_loss:4.9482 train_time:1586241ms step_avg:3654.93ms
|
| 1156 |
+
step:445/6000 train_loss:4.8915 train_time:1589872ms step_avg:3654.88ms
|
| 1157 |
+
step:446/6000 train_loss:4.9744 train_time:1593501ms step_avg:3654.82ms
|
| 1158 |
+
step:447/6000 train_loss:4.8587 train_time:1597130ms step_avg:3654.76ms
|
| 1159 |
+
step:448/6000 train_loss:4.9324 train_time:1600761ms step_avg:3654.71ms
|
| 1160 |
+
step:449/6000 train_loss:4.6643 train_time:1604391ms step_avg:3654.65ms
|
| 1161 |
+
step:450/6000 train_loss:4.9564 train_time:1608023ms step_avg:3654.60ms
|
| 1162 |
+
step:451/6000 train_loss:4.8272 train_time:1611652ms step_avg:3654.54ms
|
| 1163 |
+
step:452/6000 train_loss:4.9246 train_time:1615282ms step_avg:3654.48ms
|
| 1164 |
+
step:453/6000 train_loss:4.9574 train_time:1618909ms step_avg:3654.42ms
|
| 1165 |
+
step:454/6000 train_loss:4.7653 train_time:1622538ms step_avg:3654.36ms
|
| 1166 |
+
step:455/6000 train_loss:4.7497 train_time:1626165ms step_avg:3654.30ms
|
| 1167 |
+
step:456/6000 train_loss:4.7461 train_time:1629793ms step_avg:3654.25ms
|
| 1168 |
+
step:457/6000 train_loss:4.8511 train_time:1633421ms step_avg:3654.19ms
|
| 1169 |
+
step:458/6000 train_loss:5.0393 train_time:1637047ms step_avg:3654.12ms
|
| 1170 |
+
step:459/6000 train_loss:4.8129 train_time:1640676ms step_avg:3654.07ms
|
| 1171 |
+
step:460/6000 train_loss:5.0411 train_time:1644306ms step_avg:3654.01ms
|
| 1172 |
+
step:461/6000 train_loss:4.8110 train_time:1647936ms step_avg:3653.96ms
|
| 1173 |
+
step:462/6000 train_loss:5.0897 train_time:1651565ms step_avg:3653.90ms
|
| 1174 |
+
step:463/6000 train_loss:4.8307 train_time:1655194ms step_avg:3653.85ms
|
| 1175 |
+
step:464/6000 train_loss:4.8614 train_time:1658820ms step_avg:3653.79ms
|
| 1176 |
+
step:465/6000 train_loss:4.6934 train_time:1662449ms step_avg:3653.73ms
|
| 1177 |
+
step:466/6000 train_loss:5.0124 train_time:1666079ms step_avg:3653.68ms
|
| 1178 |
+
step:467/6000 train_loss:4.8417 train_time:1669707ms step_avg:3653.63ms
|
| 1179 |
+
step:468/6000 train_loss:4.9535 train_time:1673335ms step_avg:3653.57ms
|
| 1180 |
+
step:469/6000 train_loss:4.7321 train_time:1676963ms step_avg:3653.52ms
|
| 1181 |
+
step:470/6000 train_loss:5.0075 train_time:1680591ms step_avg:3653.46ms
|
| 1182 |
+
step:471/6000 train_loss:4.8125 train_time:1684219ms step_avg:3653.40ms
|
| 1183 |
+
step:472/6000 train_loss:4.7184 train_time:1687847ms step_avg:3653.35ms
|
| 1184 |
+
step:473/6000 train_loss:5.0503 train_time:1691474ms step_avg:3653.29ms
|
| 1185 |
+
step:474/6000 train_loss:5.7232 train_time:1695101ms step_avg:3653.23ms
|
| 1186 |
+
step:475/6000 train_loss:4.8569 train_time:1698731ms step_avg:3653.18ms
|
| 1187 |
+
step:476/6000 train_loss:4.7851 train_time:1702361ms step_avg:3653.14ms
|
| 1188 |
+
step:477/6000 train_loss:4.7731 train_time:1705990ms step_avg:3653.08ms
|
| 1189 |
+
step:478/6000 train_loss:4.2941 train_time:1709615ms step_avg:3653.02ms
|
| 1190 |
+
step:479/6000 train_loss:4.5978 train_time:1713245ms step_avg:3652.97ms
|
| 1191 |
+
step:480/6000 train_loss:4.7632 train_time:1716874ms step_avg:3652.92ms
|
| 1192 |
+
step:481/6000 train_loss:4.7322 train_time:1720501ms step_avg:3652.87ms
|
| 1193 |
+
step:482/6000 train_loss:4.7214 train_time:1724132ms step_avg:3652.82ms
|
| 1194 |
+
step:483/6000 train_loss:5.0827 train_time:1727760ms step_avg:3652.77ms
|
| 1195 |
+
step:484/6000 train_loss:4.8032 train_time:1731389ms step_avg:3652.72ms
|
| 1196 |
+
step:485/6000 train_loss:4.7942 train_time:1735019ms step_avg:3652.67ms
|
| 1197 |
+
step:486/6000 train_loss:5.1739 train_time:1738648ms step_avg:3652.62ms
|
| 1198 |
+
step:487/6000 train_loss:4.6447 train_time:1742277ms step_avg:3652.57ms
|
| 1199 |
+
step:488/6000 train_loss:4.8322 train_time:1745908ms step_avg:3652.53ms
|
| 1200 |
+
step:489/6000 train_loss:4.6539 train_time:1749537ms step_avg:3652.48ms
|
| 1201 |
+
step:490/6000 train_loss:4.8344 train_time:1753167ms step_avg:3652.43ms
|
| 1202 |
+
step:491/6000 train_loss:4.9443 train_time:1756841ms step_avg:3652.48ms
|
| 1203 |
+
step:492/6000 train_loss:4.6566 train_time:1760471ms step_avg:3652.43ms
|
| 1204 |
+
step:493/6000 train_loss:4.5952 train_time:1764101ms step_avg:3652.38ms
|
| 1205 |
+
step:494/6000 train_loss:4.6912 train_time:1767730ms step_avg:3652.33ms
|
| 1206 |
+
step:495/6000 train_loss:4.5607 train_time:1771360ms step_avg:3652.29ms
|
| 1207 |
+
step:496/6000 train_loss:4.9199 train_time:1774990ms step_avg:3652.24ms
|
| 1208 |
+
step:497/6000 train_loss:5.0203 train_time:1778619ms step_avg:3652.20ms
|
| 1209 |
+
step:498/6000 train_loss:4.8337 train_time:1782249ms step_avg:3652.15ms
|
| 1210 |
+
step:499/6000 train_loss:4.6731 train_time:1785878ms step_avg:3652.10ms
|
| 1211 |
+
step:500/6000 train_loss:5.3884 train_time:1789508ms step_avg:3652.06ms
|
| 1212 |
+
step:500/6000 val_loss:4.7687 train_time:1789674ms step_avg:3652.40ms
|
| 1213 |
+
step:501/6000 train_loss:4.4299 train_time:1793116ms step_avg:3651.97ms
|
| 1214 |
+
step:502/6000 train_loss:4.6955 train_time:1796730ms step_avg:3651.89ms
|
| 1215 |
+
step:503/6000 train_loss:4.7719 train_time:1800347ms step_avg:3651.82ms
|
| 1216 |
+
step:504/6000 train_loss:4.9558 train_time:1803968ms step_avg:3651.76ms
|
| 1217 |
+
step:505/6000 train_loss:4.7148 train_time:1807588ms step_avg:3651.69ms
|
| 1218 |
+
step:506/6000 train_loss:5.0235 train_time:1811211ms step_avg:3651.64ms
|
| 1219 |
+
step:507/6000 train_loss:4.7132 train_time:1814833ms step_avg:3651.58ms
|
| 1220 |
+
step:508/6000 train_loss:4.7709 train_time:1818512ms step_avg:3651.63ms
|
| 1221 |
+
step:509/6000 train_loss:6.0061 train_time:1825170ms step_avg:3657.65ms
|
| 1222 |
+
step:510/6000 train_loss:4.6490 train_time:1828789ms step_avg:3657.58ms
|
| 1223 |
+
step:511/6000 train_loss:4.7361 train_time:1832411ms step_avg:3657.51ms
|
| 1224 |
+
step:512/6000 train_loss:4.6377 train_time:1836037ms step_avg:3657.44ms
|
| 1225 |
+
step:513/6000 train_loss:4.9031 train_time:1839661ms step_avg:3657.38ms
|
| 1226 |
+
step:514/6000 train_loss:4.4241 train_time:1843284ms step_avg:3657.31ms
|
| 1227 |
+
step:515/6000 train_loss:4.6703 train_time:1846909ms step_avg:3657.24ms
|
| 1228 |
+
step:516/6000 train_loss:4.7645 train_time:1850533ms step_avg:3657.18ms
|
| 1229 |
+
step:517/6000 train_loss:4.6484 train_time:1854161ms step_avg:3657.12ms
|
| 1230 |
+
step:518/6000 train_loss:4.5278 train_time:1857790ms step_avg:3657.07ms
|
| 1231 |
+
step:519/6000 train_loss:4.7858 train_time:1861418ms step_avg:3657.01ms
|
| 1232 |
+
step:520/6000 train_loss:4.6826 train_time:1865047ms step_avg:3656.95ms
|
| 1233 |
+
step:521/6000 train_loss:4.5754 train_time:1868671ms step_avg:3656.89ms
|
| 1234 |
+
step:522/6000 train_loss:4.6022 train_time:1872298ms step_avg:3656.83ms
|
| 1235 |
+
step:523/6000 train_loss:4.7435 train_time:1875927ms step_avg:3656.78ms
|
| 1236 |
+
step:524/6000 train_loss:5.1156 train_time:1879556ms step_avg:3656.72ms
|
| 1237 |
+
step:525/6000 train_loss:4.7967 train_time:1883183ms step_avg:3656.67ms
|
| 1238 |
+
step:526/6000 train_loss:4.7887 train_time:1886811ms step_avg:3656.61ms
|
| 1239 |
+
step:527/6000 train_loss:4.8101 train_time:1890441ms step_avg:3656.56ms
|
| 1240 |
+
step:528/6000 train_loss:4.9607 train_time:1894070ms step_avg:3656.51ms
|
| 1241 |
+
step:529/6000 train_loss:4.7899 train_time:1897699ms step_avg:3656.45ms
|
| 1242 |
+
step:530/6000 train_loss:4.6622 train_time:1901327ms step_avg:3656.40ms
|
| 1243 |
+
step:531/6000 train_loss:4.5698 train_time:1904957ms step_avg:3656.35ms
|
| 1244 |
+
step:532/6000 train_loss:4.7495 train_time:1908587ms step_avg:3656.30ms
|
| 1245 |
+
step:533/6000 train_loss:4.5271 train_time:1912216ms step_avg:3656.24ms
|
| 1246 |
+
step:534/6000 train_loss:4.7791 train_time:1915845ms step_avg:3656.19ms
|
| 1247 |
+
step:535/6000 train_loss:4.6713 train_time:1919474ms step_avg:3656.14ms
|
| 1248 |
+
step:536/6000 train_loss:4.7117 train_time:1923105ms step_avg:3656.09ms
|
| 1249 |
+
step:537/6000 train_loss:4.5788 train_time:1926736ms step_avg:3656.05ms
|
| 1250 |
+
step:538/6000 train_loss:4.6305 train_time:1930366ms step_avg:3656.00ms
|
| 1251 |
+
step:539/6000 train_loss:4.5587 train_time:1933997ms step_avg:3655.95ms
|
| 1252 |
+
step:540/6000 train_loss:5.0118 train_time:1937628ms step_avg:3655.90ms
|
| 1253 |
+
step:541/6000 train_loss:4.0148 train_time:1941257ms step_avg:3655.85ms
|
| 1254 |
+
step:542/6000 train_loss:4.7977 train_time:1944888ms step_avg:3655.80ms
|
| 1255 |
+
step:543/6000 train_loss:4.6554 train_time:1948516ms step_avg:3655.75ms
|
| 1256 |
+
step:544/6000 train_loss:4.5977 train_time:1952149ms step_avg:3655.71ms
|
| 1257 |
+
step:545/6000 train_loss:4.5225 train_time:1955777ms step_avg:3655.66ms
|
| 1258 |
+
step:546/6000 train_loss:4.7838 train_time:1959407ms step_avg:3655.61ms
|
| 1259 |
+
step:547/6000 train_loss:4.7514 train_time:1963034ms step_avg:3655.56ms
|
| 1260 |
+
step:548/6000 train_loss:4.5054 train_time:1966666ms step_avg:3655.51ms
|
| 1261 |
+
step:549/6000 train_loss:4.4277 train_time:1970294ms step_avg:3655.46ms
|
| 1262 |
+
step:550/6000 train_loss:4.6098 train_time:1973922ms step_avg:3655.41ms
|
| 1263 |
+
step:551/6000 train_loss:4.6899 train_time:1977555ms step_avg:3655.37ms
|
| 1264 |
+
step:552/6000 train_loss:4.8056 train_time:1981187ms step_avg:3655.33ms
|
| 1265 |
+
step:553/6000 train_loss:4.5559 train_time:1984818ms step_avg:3655.28ms
|
| 1266 |
+
step:554/6000 train_loss:4.7542 train_time:1988450ms step_avg:3655.24ms
|
| 1267 |
+
step:555/6000 train_loss:4.6074 train_time:1992082ms step_avg:3655.20ms
|
| 1268 |
+
step:556/6000 train_loss:4.5659 train_time:1995714ms step_avg:3655.15ms
|
| 1269 |
+
step:557/6000 train_loss:4.6181 train_time:1999347ms step_avg:3655.11ms
|
| 1270 |
+
step:558/6000 train_loss:4.6267 train_time:2002981ms step_avg:3655.07ms
|
| 1271 |
+
step:559/6000 train_loss:4.6393 train_time:2006614ms step_avg:3655.04ms
|
| 1272 |
+
step:560/6000 train_loss:4.4061 train_time:2010251ms step_avg:3655.00ms
|
| 1273 |
+
step:561/6000 train_loss:4.6114 train_time:2013883ms step_avg:3654.96ms
|
| 1274 |
+
step:562/6000 train_loss:4.5854 train_time:2017514ms step_avg:3654.92ms
|
| 1275 |
+
step:563/6000 train_loss:4.5859 train_time:2021147ms step_avg:3654.88ms
|
| 1276 |
+
step:564/6000 train_loss:4.4435 train_time:2024780ms step_avg:3654.84ms
|
| 1277 |
+
step:565/6000 train_loss:4.4597 train_time:2028411ms step_avg:3654.80ms
|
| 1278 |
+
step:566/6000 train_loss:4.9380 train_time:2032043ms step_avg:3654.75ms
|
| 1279 |
+
step:567/6000 train_loss:4.5589 train_time:2035679ms step_avg:3654.72ms
|
| 1280 |
+
step:568/6000 train_loss:4.7541 train_time:2039313ms step_avg:3654.68ms
|
| 1281 |
+
step:569/6000 train_loss:4.4159 train_time:2042945ms step_avg:3654.64ms
|
| 1282 |
+
step:570/6000 train_loss:4.4905 train_time:2046578ms step_avg:3654.60ms
|
| 1283 |
+
step:571/6000 train_loss:4.2774 train_time:2050212ms step_avg:3654.57ms
|
| 1284 |
+
step:572/6000 train_loss:4.5427 train_time:2053842ms step_avg:3654.52ms
|
| 1285 |
+
step:573/6000 train_loss:4.7828 train_time:2057472ms step_avg:3654.48ms
|
| 1286 |
+
step:574/6000 train_loss:4.6250 train_time:2061104ms step_avg:3654.44ms
|
| 1287 |
+
step:575/6000 train_loss:4.3700 train_time:2064736ms step_avg:3654.40ms
|
| 1288 |
+
step:576/6000 train_loss:4.5276 train_time:2068365ms step_avg:3654.36ms
|
| 1289 |
+
step:577/6000 train_loss:4.5232 train_time:2071994ms step_avg:3654.31ms
|
| 1290 |
+
step:578/6000 train_loss:4.5612 train_time:2075626ms step_avg:3654.27ms
|
| 1291 |
+
step:579/6000 train_loss:4.4730 train_time:2079257ms step_avg:3654.23ms
|
| 1292 |
+
step:580/6000 train_loss:4.6328 train_time:2082887ms step_avg:3654.19ms
|
| 1293 |
+
step:581/6000 train_loss:4.5414 train_time:2086516ms step_avg:3654.14ms
|
| 1294 |
+
step:582/6000 train_loss:4.5132 train_time:2090144ms step_avg:3654.10ms
|
| 1295 |
+
step:583/6000 train_loss:4.4790 train_time:2093774ms step_avg:3654.06ms
|
| 1296 |
+
step:584/6000 train_loss:4.3751 train_time:2097448ms step_avg:3654.09ms
|
| 1297 |
+
step:585/6000 train_loss:4.5225 train_time:2101080ms step_avg:3654.05ms
|
| 1298 |
+
step:586/6000 train_loss:4.5223 train_time:2104711ms step_avg:3654.01ms
|
| 1299 |
+
step:587/6000 train_loss:4.6404 train_time:2108340ms step_avg:3653.97ms
|
| 1300 |
+
step:588/6000 train_loss:4.5573 train_time:2111969ms step_avg:3653.93ms
|
| 1301 |
+
step:589/6000 train_loss:4.3420 train_time:2115600ms step_avg:3653.89ms
|
| 1302 |
+
step:590/6000 train_loss:4.5855 train_time:2119228ms step_avg:3653.84ms
|
| 1303 |
+
step:591/6000 train_loss:4.7812 train_time:2122858ms step_avg:3653.80ms
|
| 1304 |
+
step:592/6000 train_loss:4.6544 train_time:2126488ms step_avg:3653.76ms
|
| 1305 |
+
step:593/6000 train_loss:4.3806 train_time:2130117ms step_avg:3653.72ms
|
| 1306 |
+
step:594/6000 train_loss:4.7755 train_time:2133746ms step_avg:3653.67ms
|
| 1307 |
+
step:595/6000 train_loss:4.7924 train_time:2137376ms step_avg:3653.64ms
|
| 1308 |
+
step:596/6000 train_loss:4.4418 train_time:2141006ms step_avg:3653.59ms
|
| 1309 |
+
step:597/6000 train_loss:4.6742 train_time:2144636ms step_avg:3653.55ms
|
| 1310 |
+
step:598/6000 train_loss:4.4761 train_time:2148267ms step_avg:3653.52ms
|
| 1311 |
+
step:599/6000 train_loss:4.5563 train_time:2151898ms step_avg:3653.48ms
|
| 1312 |
+
step:600/6000 train_loss:4.4303 train_time:2155531ms step_avg:3653.44ms
|
| 1313 |
+
step:601/6000 train_loss:4.4403 train_time:2159159ms step_avg:3653.40ms
|
| 1314 |
+
step:602/6000 train_loss:4.2670 train_time:2162793ms step_avg:3653.37ms
|
| 1315 |
+
step:603/6000 train_loss:4.3606 train_time:2166425ms step_avg:3653.33ms
|
| 1316 |
+
step:604/6000 train_loss:4.5873 train_time:2170055ms step_avg:3653.29ms
|
| 1317 |
+
step:605/6000 train_loss:4.7258 train_time:2173686ms step_avg:3653.25ms
|
| 1318 |
+
step:606/6000 train_loss:4.5174 train_time:2177316ms step_avg:3653.21ms
|
| 1319 |
+
step:607/6000 train_loss:4.4481 train_time:2180945ms step_avg:3653.17ms
|
| 1320 |
+
step:608/6000 train_loss:4.4979 train_time:2184577ms step_avg:3653.14ms
|
| 1321 |
+
step:609/6000 train_loss:4.1632 train_time:2188208ms step_avg:3653.10ms
|
| 1322 |
+
step:610/6000 train_loss:4.5947 train_time:2194900ms step_avg:3658.17ms
|
| 1323 |
+
step:611/6000 train_loss:4.5494 train_time:2198523ms step_avg:3658.11ms
|
| 1324 |
+
step:612/6000 train_loss:4.5678 train_time:2202151ms step_avg:3658.06ms
|
| 1325 |
+
step:613/6000 train_loss:4.4684 train_time:2205780ms step_avg:3658.01ms
|
| 1326 |
+
step:614/6000 train_loss:4.5639 train_time:2209407ms step_avg:3657.96ms
|
| 1327 |
+
step:615/6000 train_loss:4.3634 train_time:2213035ms step_avg:3657.91ms
|
| 1328 |
+
step:616/6000 train_loss:4.5191 train_time:2216667ms step_avg:3657.87ms
|
| 1329 |
+
step:617/6000 train_loss:4.4336 train_time:2220296ms step_avg:3657.82ms
|
| 1330 |
+
step:618/6000 train_loss:4.6884 train_time:2223928ms step_avg:3657.78ms
|
| 1331 |
+
step:619/6000 train_loss:4.5825 train_time:2227559ms step_avg:3657.73ms
|
| 1332 |
+
step:620/6000 train_loss:4.3245 train_time:2231195ms step_avg:3657.70ms
|
| 1333 |
+
step:621/6000 train_loss:4.4822 train_time:2234825ms step_avg:3657.65ms
|
| 1334 |
+
step:622/6000 train_loss:4.5575 train_time:2238459ms step_avg:3657.61ms
|
| 1335 |
+
step:623/6000 train_loss:4.5691 train_time:2242095ms step_avg:3657.58ms
|
| 1336 |
+
step:624/6000 train_loss:4.6687 train_time:2245728ms step_avg:3657.54ms
|
| 1337 |
+
step:625/6000 train_loss:4.6552 train_time:2249360ms step_avg:3657.50ms
|
| 1338 |
+
step:625/6000 val_loss:4.4481 train_time:2249528ms step_avg:3657.77ms
|
| 1339 |
+
step:626/6000 train_loss:4.3914 train_time:2252976ms step_avg:3657.43ms
|
| 1340 |
+
step:627/6000 train_loss:4.2471 train_time:2256600ms step_avg:3657.37ms
|
| 1341 |
+
step:628/6000 train_loss:4.4512 train_time:2260225ms step_avg:3657.32ms
|
| 1342 |
+
step:629/6000 train_loss:4.3585 train_time:2263851ms step_avg:3657.27ms
|
| 1343 |
+
step:630/6000 train_loss:4.3883 train_time:2267478ms step_avg:3657.22ms
|
| 1344 |
+
step:631/6000 train_loss:4.4097 train_time:2271109ms step_avg:3657.18ms
|
| 1345 |
+
step:632/6000 train_loss:4.4928 train_time:2274736ms step_avg:3657.13ms
|
| 1346 |
+
step:633/6000 train_loss:4.3365 train_time:2278365ms step_avg:3657.09ms
|
| 1347 |
+
step:634/6000 train_loss:4.3835 train_time:2281994ms step_avg:3657.04ms
|
| 1348 |
+
step:635/6000 train_loss:4.7298 train_time:2285626ms step_avg:3657.00ms
|
| 1349 |
+
step:636/6000 train_loss:4.2990 train_time:2289258ms step_avg:3656.96ms
|
| 1350 |
+
step:637/6000 train_loss:4.3027 train_time:2292889ms step_avg:3656.92ms
|
| 1351 |
+
step:638/6000 train_loss:4.3175 train_time:2296524ms step_avg:3656.89ms
|
| 1352 |
+
step:639/6000 train_loss:4.8215 train_time:2300156ms step_avg:3656.85ms
|
| 1353 |
+
step:640/6000 train_loss:4.3754 train_time:2303787ms step_avg:3656.80ms
|
| 1354 |
+
step:641/6000 train_loss:4.2805 train_time:2307416ms step_avg:3656.76ms
|
| 1355 |
+
step:642/6000 train_loss:4.6754 train_time:2311050ms step_avg:3656.72ms
|
| 1356 |
+
step:643/6000 train_loss:4.4327 train_time:2314681ms step_avg:3656.68ms
|
| 1357 |
+
step:644/6000 train_loss:4.5501 train_time:2318313ms step_avg:3656.65ms
|
| 1358 |
+
step:645/6000 train_loss:4.4077 train_time:2321943ms step_avg:3656.60ms
|
| 1359 |
+
step:646/6000 train_loss:4.4020 train_time:2325573ms step_avg:3656.56ms
|
| 1360 |
+
step:647/6000 train_loss:4.3372 train_time:2329203ms step_avg:3656.52ms
|
| 1361 |
+
step:648/6000 train_loss:4.2046 train_time:2332836ms step_avg:3656.48ms
|
| 1362 |
+
step:649/6000 train_loss:4.2835 train_time:2336466ms step_avg:3656.44ms
|
| 1363 |
+
step:650/6000 train_loss:4.2724 train_time:2340097ms step_avg:3656.40ms
|
| 1364 |
+
step:651/6000 train_loss:4.4419 train_time:2343727ms step_avg:3656.36ms
|
| 1365 |
+
step:652/6000 train_loss:4.4662 train_time:2347359ms step_avg:3656.32ms
|
| 1366 |
+
step:653/6000 train_loss:4.3646 train_time:2350994ms step_avg:3656.29ms
|
| 1367 |
+
step:654/6000 train_loss:4.4524 train_time:2354627ms step_avg:3656.25ms
|
| 1368 |
+
step:655/6000 train_loss:4.7181 train_time:2358258ms step_avg:3656.21ms
|
| 1369 |
+
step:656/6000 train_loss:4.2125 train_time:2361891ms step_avg:3656.18ms
|
| 1370 |
+
step:657/6000 train_loss:4.3028 train_time:2365521ms step_avg:3656.14ms
|
| 1371 |
+
step:658/6000 train_loss:4.6294 train_time:2369152ms step_avg:3656.10ms
|
| 1372 |
+
step:659/6000 train_loss:4.3472 train_time:2372782ms step_avg:3656.06ms
|
| 1373 |
+
step:660/6000 train_loss:4.3978 train_time:2376415ms step_avg:3656.02ms
|
| 1374 |
+
step:661/6000 train_loss:4.2346 train_time:2380046ms step_avg:3655.99ms
|
| 1375 |
+
step:662/6000 train_loss:4.0449 train_time:2383679ms step_avg:3655.95ms
|
| 1376 |
+
step:663/6000 train_loss:4.3472 train_time:2387311ms step_avg:3655.91ms
|
| 1377 |
+
step:664/6000 train_loss:4.2734 train_time:2390943ms step_avg:3655.88ms
|
| 1378 |
+
step:665/6000 train_loss:4.2062 train_time:2394575ms step_avg:3655.84ms
|
| 1379 |
+
step:666/6000 train_loss:4.2785 train_time:2398206ms step_avg:3655.80ms
|
| 1380 |
+
step:667/6000 train_loss:4.6495 train_time:2401840ms step_avg:3655.77ms
|
| 1381 |
+
step:668/6000 train_loss:4.2498 train_time:2405475ms step_avg:3655.74ms
|
| 1382 |
+
step:669/6000 train_loss:4.2067 train_time:2409105ms step_avg:3655.70ms
|
| 1383 |
+
step:670/6000 train_loss:4.6130 train_time:2412736ms step_avg:3655.66ms
|
| 1384 |
+
step:671/6000 train_loss:4.3435 train_time:2416365ms step_avg:3655.62ms
|
| 1385 |
+
step:672/6000 train_loss:4.3870 train_time:2419998ms step_avg:3655.59ms
|
| 1386 |
+
step:673/6000 train_loss:4.3546 train_time:2423632ms step_avg:3655.55ms
|
| 1387 |
+
step:674/6000 train_loss:4.0725 train_time:2427265ms step_avg:3655.52ms
|
| 1388 |
+
step:675/6000 train_loss:4.2985 train_time:2430894ms step_avg:3655.48ms
|
| 1389 |
+
step:676/6000 train_loss:4.3053 train_time:2434525ms step_avg:3655.44ms
|
| 1390 |
+
step:677/6000 train_loss:4.4992 train_time:2438154ms step_avg:3655.40ms
|
| 1391 |
+
step:678/6000 train_loss:4.2678 train_time:2441787ms step_avg:3655.37ms
|
| 1392 |
+
step:679/6000 train_loss:4.6663 train_time:2445417ms step_avg:3655.33ms
|
| 1393 |
+
step:680/6000 train_loss:4.2243 train_time:2449050ms step_avg:3655.30ms
|
| 1394 |
+
step:681/6000 train_loss:4.4718 train_time:2452677ms step_avg:3655.26ms
|
| 1395 |
+
step:682/6000 train_loss:4.3530 train_time:2456308ms step_avg:3655.22ms
|
| 1396 |
+
step:683/6000 train_loss:4.2803 train_time:2459939ms step_avg:3655.18ms
|
| 1397 |
+
step:684/6000 train_loss:4.5507 train_time:2463567ms step_avg:3655.14ms
|
| 1398 |
+
step:685/6000 train_loss:4.3428 train_time:2467198ms step_avg:3655.11ms
|
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0005_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 5,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0005,
|
| 7 |
+
"muon_lr": 0.05,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/adam_lr_search"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "468b38e5-f77c-4ee0-bcb4-4167524b1954",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/adam_lr_search/mode_5_param_qkvo_adam_lr_0.0005_seed_42/training_log_468b38e5-f77c-4ee0-bcb4-4167524b1954.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.0005_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 0,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0005,
|
| 7 |
+
"muon_lr": 0.0005,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/muon_lr_search"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "c9f9753c-1658-43e4-95d3-e6169bb4b689",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.0005_seed_42/training_log_c9f9753c-1658-43e4-95d3-e6169bb4b689.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.005_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 0,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0005,
|
| 7 |
+
"muon_lr": 0.005,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/muon_lr_search"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "e57f0ab5-52cb-4681-9781-64dc94922cf1",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_0.005_seed_42/training_log_e57f0ab5-52cb-4681-9781-64dc94922cf1.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_5e-05_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 0,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0005,
|
| 7 |
+
"muon_lr": 5e-05,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/muon_lr_search"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "61537f41-2746-44ec-a838-26ce9c6334dc",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/muon_lr_search/mode_0_param_qkvo_muon_lr_5e-05_seed_42/training_log_61537f41-2746-44ec-a838-26ce9c6334dc.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search_head/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.005_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 0,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.005,
|
| 7 |
+
"muon_lr": 0.0005,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/muon_lr_search_head"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "be3ad22d-9271-4557-8520-677019d1ba75",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/muon_lr_search_head/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.005_seed_42/training_log_bdcbf483-fd51-48ef-9dfd-70f6b4f1756b.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search_head/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.005_seed_42/training_log_be3ad22d-9271-4557-8520-677019d1ba75.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.0005_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 0,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.0005,
|
| 7 |
+
"muon_lr": 0.0005,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/muon_lr_search_nes"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "9fef894a-70e9-4399-b152-6eaf5fc0a83a",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.0005_seed_42/training_log_9fef894a-70e9-4399-b152-6eaf5fc0a83a.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.008_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 0,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.008,
|
| 7 |
+
"muon_lr": 0.0005,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/muon_lr_search_nes"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "16f75572-a351-445f-b63f-555db4ad8a4a",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.0005_adam_lr_0.008_seed_42/training_log_16f75572-a351-445f-b63f-555db4ad8a4a.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.005_adam_lr_0.008_seed_42/config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cli_args": {
|
| 3 |
+
"seed": 42,
|
| 4 |
+
"optimizer_mode": 0,
|
| 5 |
+
"model_parameterization": "qkvo",
|
| 6 |
+
"adam_lr": 0.008,
|
| 7 |
+
"muon_lr": 0.005,
|
| 8 |
+
"base_dir": "logs_new_MUON_large_reshape/muon_lr_search_nes"
|
| 9 |
+
},
|
| 10 |
+
"hyperparameters": {
|
| 11 |
+
"input_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin",
|
| 12 |
+
"input_val_bin": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin",
|
| 13 |
+
"batch_size": 960,
|
| 14 |
+
"device_batch_size": 24,
|
| 15 |
+
"sequence_length": 1024,
|
| 16 |
+
"num_iterations": 6000,
|
| 17 |
+
"learning_rate": 0.0018,
|
| 18 |
+
"warmup_iters": 0,
|
| 19 |
+
"warmdown_iters": 0,
|
| 20 |
+
"weight_decay": 0,
|
| 21 |
+
"val_loss_every": 125,
|
| 22 |
+
"val_tokens": 10420224,
|
| 23 |
+
"save_every": 0
|
| 24 |
+
},
|
| 25 |
+
"run_uuid_for_log": "ed6797f0-bbfd-4e73-ba21-3ae651151e5d",
|
| 26 |
+
"script_code_logged_at_start": true
|
| 27 |
+
}
|
logs_new_MUON_large_reshape/muon_lr_search_nes/mode_0_param_qkvo_muon_lr_0.005_adam_lr_0.008_seed_42/training_log_ed6797f0-bbfd-4e73-ba21-3ae651151e5d.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|