Update magnum.yml
Browse files- magnum.yml +5 -5
magnum.yml
CHANGED
|
@@ -55,12 +55,12 @@ wandb_name: magnum-attempt-01
|
|
| 55 |
wandb_log_model:
|
| 56 |
|
| 57 |
gradient_accumulation_steps: 4
|
| 58 |
-
micro_batch_size:
|
| 59 |
num_epochs: 2
|
| 60 |
optimizer: adamw_bnb_8bit
|
| 61 |
lr_scheduler: cosine
|
| 62 |
-
learning_rate: 0.
|
| 63 |
-
weight_decay: 0.
|
| 64 |
|
| 65 |
train_on_inputs: false
|
| 66 |
group_by_length: false
|
|
@@ -76,14 +76,14 @@ logging_steps: 1
|
|
| 76 |
xformers_attention:
|
| 77 |
flash_attention: true
|
| 78 |
|
| 79 |
-
|
| 80 |
evals_per_epoch: 4
|
| 81 |
eval_table_size:
|
| 82 |
eval_max_new_tokens: 128
|
| 83 |
saves_per_epoch: 1
|
| 84 |
|
| 85 |
debug:
|
| 86 |
-
deepspeed:
|
| 87 |
fsdp:
|
| 88 |
fsdp_config:
|
| 89 |
|
|
|
|
| 55 |
wandb_log_model:
|
| 56 |
|
| 57 |
gradient_accumulation_steps: 4
|
| 58 |
+
micro_batch_size: 2
|
| 59 |
num_epochs: 2
|
| 60 |
optimizer: adamw_bnb_8bit
|
| 61 |
lr_scheduler: cosine
|
| 62 |
+
learning_rate: 0.00001
|
| 63 |
+
weight_decay: 0.02
|
| 64 |
|
| 65 |
train_on_inputs: false
|
| 66 |
group_by_length: false
|
|
|
|
| 76 |
xformers_attention:
|
| 77 |
flash_attention: true
|
| 78 |
|
| 79 |
+
warmup_steps: 40
|
| 80 |
evals_per_epoch: 4
|
| 81 |
eval_table_size:
|
| 82 |
eval_max_new_tokens: 128
|
| 83 |
saves_per_epoch: 1
|
| 84 |
|
| 85 |
debug:
|
| 86 |
+
deepspeed: ./deepspeed_configs/zero3_bf16.json
|
| 87 |
fsdp:
|
| 88 |
fsdp_config:
|
| 89 |
|