Commit
·
9fc815a
1
Parent(s):
6b181a8
add model weights and checkpoints
Browse files- README.md +1 -1
- checkpoint-1560/config.json +46 -0
- checkpoint-1560/generation_config.json +6 -0
- checkpoint-1560/merges.txt +0 -0
- checkpoint-1560/model.safetensors +3 -0
- checkpoint-1560/optimizer.pt +3 -0
- checkpoint-1560/rng_state_0.pth +3 -0
- checkpoint-1560/rng_state_1.pth +3 -0
- checkpoint-1560/rng_state_2.pth +3 -0
- checkpoint-1560/rng_state_3.pth +3 -0
- checkpoint-1560/scaler.pt +3 -0
- checkpoint-1560/scheduler.pt +3 -0
- checkpoint-1560/special_tokens_map.json +6 -0
- checkpoint-1560/tokenizer.json +0 -0
- checkpoint-1560/tokenizer_config.json +21 -0
- checkpoint-1560/trainer_state.json +155 -0
- checkpoint-1560/training_args.bin +3 -0
- checkpoint-1560/vocab.json +0 -0
- checkpoint-780/config.json +46 -0
- checkpoint-780/generation_config.json +6 -0
- checkpoint-780/merges.txt +0 -0
- checkpoint-780/model.safetensors +3 -0
- checkpoint-780/optimizer.pt +3 -0
- checkpoint-780/rng_state_0.pth +3 -0
- checkpoint-780/rng_state_1.pth +3 -0
- checkpoint-780/rng_state_2.pth +3 -0
- checkpoint-780/rng_state_3.pth +3 -0
- checkpoint-780/scaler.pt +3 -0
- checkpoint-780/scheduler.pt +3 -0
- checkpoint-780/special_tokens_map.json +6 -0
- checkpoint-780/tokenizer.json +0 -0
- checkpoint-780/tokenizer_config.json +21 -0
- checkpoint-780/trainer_state.json +91 -0
- checkpoint-780/training_args.bin +3 -0
- checkpoint-780/vocab.json +0 -0
- model.safetensors +3 -0
- training_args.bin +0 -0
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
license:
|
| 3 |
datasets: Microsoft/ChatBench
|
| 4 |
language: en
|
| 5 |
base_model: distilgpt2
|
|
|
|
| 1 |
---
|
| 2 |
+
license: OpenAI license
|
| 3 |
datasets: Microsoft/ChatBench
|
| 4 |
language: en
|
| 5 |
base_model: distilgpt2
|
checkpoint-1560/config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_num_labels": 1,
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"GPT2LMHeadModel"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.1,
|
| 8 |
+
"bos_token_id": 50256,
|
| 9 |
+
"embd_pdrop": 0.1,
|
| 10 |
+
"eos_token_id": 50256,
|
| 11 |
+
"id2label": {
|
| 12 |
+
"0": "LABEL_0"
|
| 13 |
+
},
|
| 14 |
+
"initializer_range": 0.02,
|
| 15 |
+
"label2id": {
|
| 16 |
+
"LABEL_0": 0
|
| 17 |
+
},
|
| 18 |
+
"layer_norm_epsilon": 1e-05,
|
| 19 |
+
"model_type": "gpt2",
|
| 20 |
+
"n_ctx": 1024,
|
| 21 |
+
"n_embd": 768,
|
| 22 |
+
"n_head": 12,
|
| 23 |
+
"n_inner": null,
|
| 24 |
+
"n_layer": 6,
|
| 25 |
+
"n_positions": 1024,
|
| 26 |
+
"pad_token_id": 50256,
|
| 27 |
+
"reorder_and_upcast_attn": false,
|
| 28 |
+
"resid_pdrop": 0.1,
|
| 29 |
+
"scale_attn_by_inverse_layer_idx": false,
|
| 30 |
+
"scale_attn_weights": true,
|
| 31 |
+
"summary_activation": null,
|
| 32 |
+
"summary_first_dropout": 0.1,
|
| 33 |
+
"summary_proj_to_labels": true,
|
| 34 |
+
"summary_type": "cls_index",
|
| 35 |
+
"summary_use_proj": true,
|
| 36 |
+
"task_specific_params": {
|
| 37 |
+
"text-generation": {
|
| 38 |
+
"do_sample": true,
|
| 39 |
+
"max_length": 50
|
| 40 |
+
}
|
| 41 |
+
},
|
| 42 |
+
"torch_dtype": "float32",
|
| 43 |
+
"transformers_version": "4.52.4",
|
| 44 |
+
"use_cache": true,
|
| 45 |
+
"vocab_size": 50257
|
| 46 |
+
}
|
checkpoint-1560/generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 50256,
|
| 4 |
+
"eos_token_id": 50256,
|
| 5 |
+
"transformers_version": "4.52.4"
|
| 6 |
+
}
|
checkpoint-1560/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
checkpoint-1560/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b51c0f6938c3da25cbbccf3db7389619eba378ee98d8fbd6ca00f989dd13614f
|
| 3 |
+
size 327657928
|
checkpoint-1560/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d2e661dbb7ea237a9a392114f7c0cf424fc010ed15834dd1933ba73980b9953
|
| 3 |
+
size 655364939
|
checkpoint-1560/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b49d7668609aeebb500fa1f9afb3eb242466b4fc288d90e791e235195fefd448
|
| 3 |
+
size 15429
|
checkpoint-1560/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39184abc08b038f5da3f9f30b07db28cae224bf02c07962f2960f6dc33c84c14
|
| 3 |
+
size 15429
|
checkpoint-1560/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:97f4efb4ef6fcd5a6880f687efbbd55da5527bb2c57882e593b4af4dbaca1043
|
| 3 |
+
size 15429
|
checkpoint-1560/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee4cbc6a1b8f4decdd56929e40bbbf34e540159fc93cabde87ec65170c5e909b
|
| 3 |
+
size 15429
|
checkpoint-1560/scaler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:881e40e843583b1f863867f6c2a963ed359f55623e11a88129b36eba451469ab
|
| 3 |
+
size 1383
|
checkpoint-1560/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9bf56008cfe9be3e3897fb451f2c77d53e0cfca751cea61314cdba3f16c1d976
|
| 3 |
+
size 1465
|
checkpoint-1560/special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
checkpoint-1560/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
checkpoint-1560/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 1024,
|
| 18 |
+
"pad_token": "<|endoftext|>",
|
| 19 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 20 |
+
"unk_token": "<|endoftext|>"
|
| 21 |
+
}
|
checkpoint-1560/trainer_state.json
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": 780,
|
| 3 |
+
"best_metric": 1.4478473663330078,
|
| 4 |
+
"best_model_checkpoint": "results/models/distilgpt2_split_3/checkpoint-780",
|
| 5 |
+
"epoch": 2.0,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 1560,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.1282051282051282,
|
| 14 |
+
"grad_norm": 2.1318576335906982,
|
| 15 |
+
"learning_rate": 4.689102564102564e-05,
|
| 16 |
+
"loss": 1.8462,
|
| 17 |
+
"step": 100
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 0.2564102564102564,
|
| 21 |
+
"grad_norm": 1.8293204307556152,
|
| 22 |
+
"learning_rate": 4.368589743589744e-05,
|
| 23 |
+
"loss": 1.5051,
|
| 24 |
+
"step": 200
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 0.38461538461538464,
|
| 28 |
+
"grad_norm": 2.407285690307617,
|
| 29 |
+
"learning_rate": 4.0480769230769236e-05,
|
| 30 |
+
"loss": 1.4097,
|
| 31 |
+
"step": 300
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 0.5128205128205128,
|
| 35 |
+
"grad_norm": 2.0177223682403564,
|
| 36 |
+
"learning_rate": 3.727564102564103e-05,
|
| 37 |
+
"loss": 1.3136,
|
| 38 |
+
"step": 400
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"epoch": 0.6410256410256411,
|
| 42 |
+
"grad_norm": 1.5830180644989014,
|
| 43 |
+
"learning_rate": 3.4070512820512825e-05,
|
| 44 |
+
"loss": 1.2698,
|
| 45 |
+
"step": 500
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 0.7692307692307693,
|
| 49 |
+
"grad_norm": 1.6503092050552368,
|
| 50 |
+
"learning_rate": 3.0865384615384616e-05,
|
| 51 |
+
"loss": 1.2133,
|
| 52 |
+
"step": 600
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"epoch": 0.8974358974358975,
|
| 56 |
+
"grad_norm": 1.608035922050476,
|
| 57 |
+
"learning_rate": 2.7660256410256413e-05,
|
| 58 |
+
"loss": 1.1817,
|
| 59 |
+
"step": 700
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"epoch": 1.0,
|
| 63 |
+
"eval_loss": 1.4478473663330078,
|
| 64 |
+
"eval_runtime": 14.1171,
|
| 65 |
+
"eval_samples_per_second": 370.259,
|
| 66 |
+
"eval_steps_per_second": 23.163,
|
| 67 |
+
"step": 780
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"epoch": 1.0256410256410255,
|
| 71 |
+
"grad_norm": 1.399240493774414,
|
| 72 |
+
"learning_rate": 2.4455128205128204e-05,
|
| 73 |
+
"loss": 1.1556,
|
| 74 |
+
"step": 800
|
| 75 |
+
},
|
| 76 |
+
{
|
| 77 |
+
"epoch": 1.1538461538461537,
|
| 78 |
+
"grad_norm": 1.8416355848312378,
|
| 79 |
+
"learning_rate": 2.125e-05,
|
| 80 |
+
"loss": 1.1005,
|
| 81 |
+
"step": 900
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"epoch": 1.282051282051282,
|
| 85 |
+
"grad_norm": 1.4166646003723145,
|
| 86 |
+
"learning_rate": 1.8044871794871796e-05,
|
| 87 |
+
"loss": 1.0782,
|
| 88 |
+
"step": 1000
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"epoch": 1.4102564102564101,
|
| 92 |
+
"grad_norm": 1.3811384439468384,
|
| 93 |
+
"learning_rate": 1.483974358974359e-05,
|
| 94 |
+
"loss": 1.0568,
|
| 95 |
+
"step": 1100
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"epoch": 1.5384615384615383,
|
| 99 |
+
"grad_norm": 1.5326135158538818,
|
| 100 |
+
"learning_rate": 1.1634615384615386e-05,
|
| 101 |
+
"loss": 1.0446,
|
| 102 |
+
"step": 1200
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"epoch": 1.6666666666666665,
|
| 106 |
+
"grad_norm": 1.2560216188430786,
|
| 107 |
+
"learning_rate": 8.42948717948718e-06,
|
| 108 |
+
"loss": 1.0604,
|
| 109 |
+
"step": 1300
|
| 110 |
+
},
|
| 111 |
+
{
|
| 112 |
+
"epoch": 1.7948717948717947,
|
| 113 |
+
"grad_norm": 1.4168336391448975,
|
| 114 |
+
"learning_rate": 5.224358974358975e-06,
|
| 115 |
+
"loss": 1.0371,
|
| 116 |
+
"step": 1400
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"epoch": 1.9230769230769231,
|
| 120 |
+
"grad_norm": 1.3679345846176147,
|
| 121 |
+
"learning_rate": 2.0192307692307692e-06,
|
| 122 |
+
"loss": 1.0274,
|
| 123 |
+
"step": 1500
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"epoch": 2.0,
|
| 127 |
+
"eval_loss": 1.4688283205032349,
|
| 128 |
+
"eval_runtime": 14.0926,
|
| 129 |
+
"eval_samples_per_second": 370.904,
|
| 130 |
+
"eval_steps_per_second": 23.204,
|
| 131 |
+
"step": 1560
|
| 132 |
+
}
|
| 133 |
+
],
|
| 134 |
+
"logging_steps": 100,
|
| 135 |
+
"max_steps": 1560,
|
| 136 |
+
"num_input_tokens_seen": 0,
|
| 137 |
+
"num_train_epochs": 2,
|
| 138 |
+
"save_steps": 500,
|
| 139 |
+
"stateful_callbacks": {
|
| 140 |
+
"TrainerControl": {
|
| 141 |
+
"args": {
|
| 142 |
+
"should_epoch_stop": false,
|
| 143 |
+
"should_evaluate": false,
|
| 144 |
+
"should_log": false,
|
| 145 |
+
"should_save": true,
|
| 146 |
+
"should_training_stop": true
|
| 147 |
+
},
|
| 148 |
+
"attributes": {}
|
| 149 |
+
}
|
| 150 |
+
},
|
| 151 |
+
"total_flos": 6521966894776320.0,
|
| 152 |
+
"train_batch_size": 4,
|
| 153 |
+
"trial_name": null,
|
| 154 |
+
"trial_params": null
|
| 155 |
+
}
|
checkpoint-1560/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b8c32dd41a8369fb1de0d9c88ebf98453f3e9c428ee5572568b270ac107b3cdd
|
| 3 |
+
size 5713
|
checkpoint-1560/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
checkpoint-780/config.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_num_labels": 1,
|
| 3 |
+
"activation_function": "gelu_new",
|
| 4 |
+
"architectures": [
|
| 5 |
+
"GPT2LMHeadModel"
|
| 6 |
+
],
|
| 7 |
+
"attn_pdrop": 0.1,
|
| 8 |
+
"bos_token_id": 50256,
|
| 9 |
+
"embd_pdrop": 0.1,
|
| 10 |
+
"eos_token_id": 50256,
|
| 11 |
+
"id2label": {
|
| 12 |
+
"0": "LABEL_0"
|
| 13 |
+
},
|
| 14 |
+
"initializer_range": 0.02,
|
| 15 |
+
"label2id": {
|
| 16 |
+
"LABEL_0": 0
|
| 17 |
+
},
|
| 18 |
+
"layer_norm_epsilon": 1e-05,
|
| 19 |
+
"model_type": "gpt2",
|
| 20 |
+
"n_ctx": 1024,
|
| 21 |
+
"n_embd": 768,
|
| 22 |
+
"n_head": 12,
|
| 23 |
+
"n_inner": null,
|
| 24 |
+
"n_layer": 6,
|
| 25 |
+
"n_positions": 1024,
|
| 26 |
+
"pad_token_id": 50256,
|
| 27 |
+
"reorder_and_upcast_attn": false,
|
| 28 |
+
"resid_pdrop": 0.1,
|
| 29 |
+
"scale_attn_by_inverse_layer_idx": false,
|
| 30 |
+
"scale_attn_weights": true,
|
| 31 |
+
"summary_activation": null,
|
| 32 |
+
"summary_first_dropout": 0.1,
|
| 33 |
+
"summary_proj_to_labels": true,
|
| 34 |
+
"summary_type": "cls_index",
|
| 35 |
+
"summary_use_proj": true,
|
| 36 |
+
"task_specific_params": {
|
| 37 |
+
"text-generation": {
|
| 38 |
+
"do_sample": true,
|
| 39 |
+
"max_length": 50
|
| 40 |
+
}
|
| 41 |
+
},
|
| 42 |
+
"torch_dtype": "float32",
|
| 43 |
+
"transformers_version": "4.52.4",
|
| 44 |
+
"use_cache": true,
|
| 45 |
+
"vocab_size": 50257
|
| 46 |
+
}
|
checkpoint-780/generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 50256,
|
| 4 |
+
"eos_token_id": 50256,
|
| 5 |
+
"transformers_version": "4.52.4"
|
| 6 |
+
}
|
checkpoint-780/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
checkpoint-780/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61c73128d51531ed56973d2600c126914bb9c060a07668e27e954917d42bc7f0
|
| 3 |
+
size 327657928
|
checkpoint-780/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:431cd49432f0ee23b154493cc4270a6c1108ddb405ccaa17082b620221beb343
|
| 3 |
+
size 655364939
|
checkpoint-780/rng_state_0.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f0513a2baa362555a739508f5bcf66bc226cb73980c16f91e1e4719af8acf5e
|
| 3 |
+
size 15429
|
checkpoint-780/rng_state_1.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24f81b34fbef8c58bad9b73383237ff3b398ca04efab5da30241d984cff9614e
|
| 3 |
+
size 15429
|
checkpoint-780/rng_state_2.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df45efa4b80d600b223e6c8723ba385ab51e1583537109bfbee205212d81958e
|
| 3 |
+
size 15429
|
checkpoint-780/rng_state_3.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bf5814355331b202952d09c81426e73235c388647c762f28dc47dd06637e1443
|
| 3 |
+
size 15429
|
checkpoint-780/scaler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6ebc4f4eaf3317083143e7fd7aa804ff935640029ab9f1c0a6b07ac094c65427
|
| 3 |
+
size 1383
|
checkpoint-780/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9413c54ddfa601f6097d71d5a210892c27a41aafd2f10fccfab127e437a9b27f
|
| 3 |
+
size 1465
|
checkpoint-780/special_tokens_map.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<|endoftext|>",
|
| 3 |
+
"eos_token": "<|endoftext|>",
|
| 4 |
+
"pad_token": "<|endoftext|>",
|
| 5 |
+
"unk_token": "<|endoftext|>"
|
| 6 |
+
}
|
checkpoint-780/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
checkpoint-780/tokenizer_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"50256": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": true,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
}
|
| 12 |
+
},
|
| 13 |
+
"bos_token": "<|endoftext|>",
|
| 14 |
+
"clean_up_tokenization_spaces": false,
|
| 15 |
+
"eos_token": "<|endoftext|>",
|
| 16 |
+
"extra_special_tokens": {},
|
| 17 |
+
"model_max_length": 1024,
|
| 18 |
+
"pad_token": "<|endoftext|>",
|
| 19 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 20 |
+
"unk_token": "<|endoftext|>"
|
| 21 |
+
}
|
checkpoint-780/trainer_state.json
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_global_step": 780,
|
| 3 |
+
"best_metric": 1.4478473663330078,
|
| 4 |
+
"best_model_checkpoint": "results/models/distilgpt2_split_3/checkpoint-780",
|
| 5 |
+
"epoch": 1.0,
|
| 6 |
+
"eval_steps": 500,
|
| 7 |
+
"global_step": 780,
|
| 8 |
+
"is_hyper_param_search": false,
|
| 9 |
+
"is_local_process_zero": true,
|
| 10 |
+
"is_world_process_zero": true,
|
| 11 |
+
"log_history": [
|
| 12 |
+
{
|
| 13 |
+
"epoch": 0.1282051282051282,
|
| 14 |
+
"grad_norm": 2.1318576335906982,
|
| 15 |
+
"learning_rate": 4.689102564102564e-05,
|
| 16 |
+
"loss": 1.8462,
|
| 17 |
+
"step": 100
|
| 18 |
+
},
|
| 19 |
+
{
|
| 20 |
+
"epoch": 0.2564102564102564,
|
| 21 |
+
"grad_norm": 1.8293204307556152,
|
| 22 |
+
"learning_rate": 4.368589743589744e-05,
|
| 23 |
+
"loss": 1.5051,
|
| 24 |
+
"step": 200
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"epoch": 0.38461538461538464,
|
| 28 |
+
"grad_norm": 2.407285690307617,
|
| 29 |
+
"learning_rate": 4.0480769230769236e-05,
|
| 30 |
+
"loss": 1.4097,
|
| 31 |
+
"step": 300
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"epoch": 0.5128205128205128,
|
| 35 |
+
"grad_norm": 2.0177223682403564,
|
| 36 |
+
"learning_rate": 3.727564102564103e-05,
|
| 37 |
+
"loss": 1.3136,
|
| 38 |
+
"step": 400
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"epoch": 0.6410256410256411,
|
| 42 |
+
"grad_norm": 1.5830180644989014,
|
| 43 |
+
"learning_rate": 3.4070512820512825e-05,
|
| 44 |
+
"loss": 1.2698,
|
| 45 |
+
"step": 500
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"epoch": 0.7692307692307693,
|
| 49 |
+
"grad_norm": 1.6503092050552368,
|
| 50 |
+
"learning_rate": 3.0865384615384616e-05,
|
| 51 |
+
"loss": 1.2133,
|
| 52 |
+
"step": 600
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"epoch": 0.8974358974358975,
|
| 56 |
+
"grad_norm": 1.608035922050476,
|
| 57 |
+
"learning_rate": 2.7660256410256413e-05,
|
| 58 |
+
"loss": 1.1817,
|
| 59 |
+
"step": 700
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"epoch": 1.0,
|
| 63 |
+
"eval_loss": 1.4478473663330078,
|
| 64 |
+
"eval_runtime": 14.1171,
|
| 65 |
+
"eval_samples_per_second": 370.259,
|
| 66 |
+
"eval_steps_per_second": 23.163,
|
| 67 |
+
"step": 780
|
| 68 |
+
}
|
| 69 |
+
],
|
| 70 |
+
"logging_steps": 100,
|
| 71 |
+
"max_steps": 1560,
|
| 72 |
+
"num_input_tokens_seen": 0,
|
| 73 |
+
"num_train_epochs": 2,
|
| 74 |
+
"save_steps": 500,
|
| 75 |
+
"stateful_callbacks": {
|
| 76 |
+
"TrainerControl": {
|
| 77 |
+
"args": {
|
| 78 |
+
"should_epoch_stop": false,
|
| 79 |
+
"should_evaluate": false,
|
| 80 |
+
"should_log": false,
|
| 81 |
+
"should_save": true,
|
| 82 |
+
"should_training_stop": false
|
| 83 |
+
},
|
| 84 |
+
"attributes": {}
|
| 85 |
+
}
|
| 86 |
+
},
|
| 87 |
+
"total_flos": 3260983447388160.0,
|
| 88 |
+
"train_batch_size": 4,
|
| 89 |
+
"trial_name": null,
|
| 90 |
+
"trial_params": null
|
| 91 |
+
}
|
checkpoint-780/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b8c32dd41a8369fb1de0d9c88ebf98453f3e9c428ee5572568b270ac107b3cdd
|
| 3 |
+
size 5713
|
checkpoint-780/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61c73128d51531ed56973d2600c126914bb9c060a07668e27e954917d42bc7f0
|
| 3 |
+
size 327657928
|
training_args.bin
CHANGED
|
Binary files a/training_args.bin and b/training_args.bin differ
|
|
|