PyTorch
Bavarian
GLiNER
Bavarian
File size: 4,557 Bytes
897b599
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.7657554177195803,
  "eval_steps": 5000,
  "global_step": 10000,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.03828777088597902,
      "grad_norm": 471.1349792480469,
      "learning_rate": 3e-06,
      "loss": 205.1221,
      "step": 500
    },
    {
      "epoch": 0.07657554177195804,
      "grad_norm": 344.47088623046875,
      "learning_rate": 6e-06,
      "loss": 45.0922,
      "step": 1000
    },
    {
      "epoch": 0.11486331265793706,
      "grad_norm": 309.480224609375,
      "learning_rate": 9e-06,
      "loss": 38.0229,
      "step": 1500
    },
    {
      "epoch": 0.15315108354391607,
      "grad_norm": 564.2250366210938,
      "learning_rate": 1.2e-05,
      "loss": 33.9726,
      "step": 2000
    },
    {
      "epoch": 0.19143885442989508,
      "grad_norm": 283.4784851074219,
      "learning_rate": 1.5e-05,
      "loss": 29.991,
      "step": 2500
    },
    {
      "epoch": 0.22972662531587412,
      "grad_norm": 330.930419921875,
      "learning_rate": 1.8e-05,
      "loss": 30.7885,
      "step": 3000
    },
    {
      "epoch": 0.26801439620185313,
      "grad_norm": 342.97344970703125,
      "learning_rate": 2.1e-05,
      "loss": 25.1332,
      "step": 3500
    },
    {
      "epoch": 0.30630216708783214,
      "grad_norm": 231.11172485351562,
      "learning_rate": 2.4e-05,
      "loss": 21.5112,
      "step": 4000
    },
    {
      "epoch": 0.34458993797381116,
      "grad_norm": 706.323974609375,
      "learning_rate": 2.7000000000000002e-05,
      "loss": 19.5362,
      "step": 4500
    },
    {
      "epoch": 0.38287770885979017,
      "grad_norm": 205.98391723632812,
      "learning_rate": 3e-05,
      "loss": 18.0186,
      "step": 5000
    },
    {
      "epoch": 0.38287770885979017,
      "eval_loss": 52.25505447387695,
      "eval_runtime": 7.8963,
      "eval_samples_per_second": 1470.058,
      "eval_steps_per_second": 183.757,
      "step": 5000
    },
    {
      "epoch": 0.4211654797457692,
      "grad_norm": 237.13412475585938,
      "learning_rate": 2.9997949574887035e-05,
      "loss": 16.9354,
      "step": 5500
    },
    {
      "epoch": 0.45945325063174824,
      "grad_norm": 117.44111633300781,
      "learning_rate": 2.999179886011389e-05,
      "loss": 16.263,
      "step": 6000
    },
    {
      "epoch": 0.49774102151772726,
      "grad_norm": 419.0275573730469,
      "learning_rate": 2.9981549537224573e-05,
      "loss": 15.3433,
      "step": 6500
    },
    {
      "epoch": 0.5360287924037063,
      "grad_norm": 305.9416809082031,
      "learning_rate": 2.9967204408281618e-05,
      "loss": 13.5674,
      "step": 7000
    },
    {
      "epoch": 0.5743165632896853,
      "grad_norm": 373.90185546875,
      "learning_rate": 2.994876739510005e-05,
      "loss": 13.2024,
      "step": 7500
    },
    {
      "epoch": 0.6126043341756643,
      "grad_norm": 142.22088623046875,
      "learning_rate": 2.9926243538175172e-05,
      "loss": 13.7021,
      "step": 8000
    },
    {
      "epoch": 0.6508921050616433,
      "grad_norm": 134.59779357910156,
      "learning_rate": 2.9899638995304575e-05,
      "loss": 13.2536,
      "step": 8500
    },
    {
      "epoch": 0.6891798759476223,
      "grad_norm": 289.6990966796875,
      "learning_rate": 2.9868961039904628e-05,
      "loss": 13.1385,
      "step": 9000
    },
    {
      "epoch": 0.7274676468336013,
      "grad_norm": 529.670166015625,
      "learning_rate": 2.9834218059022027e-05,
      "loss": 12.3703,
      "step": 9500
    },
    {
      "epoch": 0.7657554177195803,
      "grad_norm": 162.87234497070312,
      "learning_rate": 2.9795419551040836e-05,
      "loss": 12.2421,
      "step": 10000
    },
    {
      "epoch": 0.7657554177195803,
      "eval_loss": 34.264163970947266,
      "eval_runtime": 7.8743,
      "eval_samples_per_second": 1474.162,
      "eval_steps_per_second": 184.27,
      "step": 10000
    }
  ],
  "logging_steps": 500,
  "max_steps": 100000,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 8,
  "save_steps": 5000,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}