Rename axolotl_config_idg9oo_8.yml to Henbane8B.yml
Browse files
axolotl_config_idg9oo_8.yml → Henbane8B.yml
RENAMED
|
@@ -18,12 +18,6 @@ strict: false
|
|
| 18 |
datasets:
|
| 19 |
- path: Gryphe/Sonnet3.5-SlimOrcaDedupCleaned
|
| 20 |
type: chat_template
|
| 21 |
-
- path: Nitral-AI/Cybersecurity-ShareGPT
|
| 22 |
-
type: chat_template
|
| 23 |
-
- path: Nitral-AI/Medical_Instruct-ShareGPT
|
| 24 |
-
type: chat_template
|
| 25 |
-
- path: Nitral-AI/Olympiad_Math-ShareGPT
|
| 26 |
-
type: chat_template
|
| 27 |
- path: anthracite-org/kalo_opus_misc_240827
|
| 28 |
type: chat_template
|
| 29 |
- path: NewEden/Claude-Instruct-5k
|
|
@@ -43,7 +37,7 @@ shuffle_merged_datasets: true
|
|
| 43 |
default_system_message: "You are an assistant that responds to the user."
|
| 44 |
dataset_prepared_path: prepared_dataset_memorycore
|
| 45 |
val_set_size: 0.0
|
| 46 |
-
output_dir: ./
|
| 47 |
|
| 48 |
sequence_len: 8192
|
| 49 |
sample_packing: true
|
|
@@ -64,12 +58,11 @@ wandb_watch:
|
|
| 64 |
wandb_name: henbane-8b-r3
|
| 65 |
wandb_log_model:
|
| 66 |
|
| 67 |
-
gradient_accumulation_steps:
|
| 68 |
micro_batch_size: 1
|
| 69 |
-
num_epochs:
|
| 70 |
optimizer: paged_adamw_8bit
|
| 71 |
lr_scheduler: cosine
|
| 72 |
-
#learning_rate: 3e-5
|
| 73 |
learning_rate: 1e-5
|
| 74 |
|
| 75 |
train_on_inputs: false
|
|
|
|
| 18 |
datasets:
|
| 19 |
- path: Gryphe/Sonnet3.5-SlimOrcaDedupCleaned
|
| 20 |
type: chat_template
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
- path: anthracite-org/kalo_opus_misc_240827
|
| 22 |
type: chat_template
|
| 23 |
- path: NewEden/Claude-Instruct-5k
|
|
|
|
| 37 |
default_system_message: "You are an assistant that responds to the user."
|
| 38 |
dataset_prepared_path: prepared_dataset_memorycore
|
| 39 |
val_set_size: 0.0
|
| 40 |
+
output_dir: ./henbane-8b-llama3.1
|
| 41 |
|
| 42 |
sequence_len: 8192
|
| 43 |
sample_packing: true
|
|
|
|
| 58 |
wandb_name: henbane-8b-r3
|
| 59 |
wandb_log_model:
|
| 60 |
|
| 61 |
+
gradient_accumulation_steps: 32
|
| 62 |
micro_batch_size: 1
|
| 63 |
+
num_epochs: 2
|
| 64 |
optimizer: paged_adamw_8bit
|
| 65 |
lr_scheduler: cosine
|
|
|
|
| 66 |
learning_rate: 1e-5
|
| 67 |
|
| 68 |
train_on_inputs: false
|