Update Instruct-4b.yml
Browse files- Instruct-4b.yml +6 -8
Instruct-4b.yml
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
-
base_model:
|
| 2 |
model_type: AutoModelForCausalLM
|
| 3 |
tokenizer_type: AutoTokenizer
|
| 4 |
|
| 5 |
-
hub_model_id: NewEden/
|
| 6 |
hub_strategy: "all_checkpoints"
|
| 7 |
push_dataset_to_hub:
|
| 8 |
hf_use_auth_token: true
|
|
@@ -24,8 +24,6 @@ strict: false
|
|
| 24 |
datasets:
|
| 25 |
- path: NewEden/Helpsteer-3-Filtered
|
| 26 |
type: dan-chat-advanced
|
| 27 |
-
- path: NewEden/No_Robots-R1-Filtered
|
| 28 |
-
type: dan-chat-advanced
|
| 29 |
- path: NewEden/GSM8K-R1-filtered
|
| 30 |
type: dan-chat-advanced
|
| 31 |
- path: NewEden/Hydrus-R1-Thinking-Sharegpt
|
|
@@ -52,7 +50,7 @@ dataset_prepared_path: prepared_data
|
|
| 52 |
val_set_size: 0.0
|
| 53 |
output_dir: ./4b-inst
|
| 54 |
|
| 55 |
-
sequence_len:
|
| 56 |
sample_packing: true
|
| 57 |
pad_to_sequence_len: true
|
| 58 |
|
|
@@ -63,10 +61,10 @@ wandb_watch:
|
|
| 63 |
wandb_name: isnt-atmpt-1
|
| 64 |
wandb_log_model:
|
| 65 |
|
| 66 |
-
gradient_accumulation_steps:
|
| 67 |
-
micro_batch_size:
|
| 68 |
num_epochs: 2
|
| 69 |
-
optimizer:
|
| 70 |
lr_scheduler: cosine
|
| 71 |
learning_rate: 2e-5
|
| 72 |
max_grad_norm: 0.2
|
|
|
|
| 1 |
+
base_model: NewEden/4B-PT
|
| 2 |
model_type: AutoModelForCausalLM
|
| 3 |
tokenizer_type: AutoTokenizer
|
| 4 |
|
| 5 |
+
hub_model_id: NewEden/4B-Inst
|
| 6 |
hub_strategy: "all_checkpoints"
|
| 7 |
push_dataset_to_hub:
|
| 8 |
hf_use_auth_token: true
|
|
|
|
| 24 |
datasets:
|
| 25 |
- path: NewEden/Helpsteer-3-Filtered
|
| 26 |
type: dan-chat-advanced
|
|
|
|
|
|
|
| 27 |
- path: NewEden/GSM8K-R1-filtered
|
| 28 |
type: dan-chat-advanced
|
| 29 |
- path: NewEden/Hydrus-R1-Thinking-Sharegpt
|
|
|
|
| 50 |
val_set_size: 0.0
|
| 51 |
output_dir: ./4b-inst
|
| 52 |
|
| 53 |
+
sequence_len: 16384
|
| 54 |
sample_packing: true
|
| 55 |
pad_to_sequence_len: true
|
| 56 |
|
|
|
|
| 61 |
wandb_name: isnt-atmpt-1
|
| 62 |
wandb_log_model:
|
| 63 |
|
| 64 |
+
gradient_accumulation_steps: 2
|
| 65 |
+
micro_batch_size: 1
|
| 66 |
num_epochs: 2
|
| 67 |
+
optimizer: paged_adamw_8bit
|
| 68 |
lr_scheduler: cosine
|
| 69 |
learning_rate: 2e-5
|
| 70 |
max_grad_norm: 0.2
|