Text Generation
Safetensors
English
Chinese
File size: 8,904 Bytes
8357c72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "fa3b9b85-762d-4a0b-a0f2-70c9a6aa59b9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import torch\n",
    "from safetensors import safe_open\n",
    "from model import Transformer as Model\n",
    "from transformers import PreTrainedTokenizerFast\n",
    "\n",
    "with open(\"./config.json\", \"r\") as f:\n",
    "    config = json.load(f)\n",
    "\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "torch.set_default_device(device)\n",
    "\n",
    "model = Model(**config)\n",
    "model.zero_grad()\n",
    "model.bfloat16()\n",
    "\n",
    "saved_states = {}\n",
    "with safe_open(\"./model.safetensors\", framework=\"pt\", device=device) as f:\n",
    "    for key in f.keys():\n",
    "        saved_states[key] = f.get_tensor(key)\n",
    "model.load_state_dict(saved_states)\n",
    "model.eval()\n",
    "\n",
    "tokenizer = PreTrainedTokenizerFast.from_pretrained(\"./\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "fe7449b5-0993-40e5-89f3-d9f0e5196839",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<s> def fibonacci(n: int):\n",
      "    if n < 2:\n",
      "        return n\n",
      "    return fibonacci(n - 1) + fibonacci(n - 2)\n",
      "\n",
      "\n",
      "def fibonacci_recursive(n: int):\n",
      "    if n < 2:\n",
      "        return n\n",
      "    return fibonacci_recursive(n - 1) + fibonacci_recursive(n - 2)\n",
      "\n",
      "\n",
      "def fibonacci_iterative(n: int):\n",
      "    if n < 2:\n",
      "        return n\n",
      "    return fibonacci_iterative"
     ]
    }
   ],
   "source": [
    "tokens = tokenizer('''def fibonacci(n: int):''')['input_ids']\n",
    "\n",
    "current = tokenizer.decode(tokens)\n",
    "print(current, end=\"\")\n",
    "\n",
    "temperature = 1e-4\n",
    "\n",
    "for _ in range(128):\n",
    "\n",
    "    tok = torch.tensor(tokens).reshape(1, -1)\n",
    "    \n",
    "    logits = model(tok)\n",
    "\n",
    "    nxt = torch.multinomial(torch.softmax(logits[:, -1].float()/temperature, dim=-1).squeeze(), num_samples=1).item()\n",
    "    \n",
    "    tokens += [nxt]\n",
    "\n",
    "    print(tokenizer.decode(tokens).replace(current, \"\", 1), end=\"\")\n",
    "    \n",
    "    current = tokenizer.decode(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f053099d-acf4-4432-9c27-1a0f9d9352c1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<s> Digits of pi:\n",
      "\n",
      "# What is the value of pi?\n",
      "\n",
      "## What is the value of pi?\n",
      "\n",
      "The value of pi is 3.14159265358979323846273284627328462732846273284627328462732846273284627328462732846273284627328462"
     ]
    }
   ],
   "source": [
    "tokens = tokenizer('''Digits of pi:''')['input_ids']\n",
    "\n",
    "current = tokenizer.decode(tokens)\n",
    "print(current, end=\"\")\n",
    "\n",
    "temperature = 1e-4\n",
    "\n",
    "for _ in range(128):\n",
    "\n",
    "    tok = torch.tensor(tokens).reshape(1, -1)\n",
    "    \n",
    "    logits = model(tok)\n",
    "\n",
    "    nxt = torch.multinomial(torch.softmax(logits[:, -1].float()/temperature, dim=-1).squeeze(), num_samples=1).item()\n",
    "    \n",
    "    tokens += [nxt]\n",
    "\n",
    "    print(tokenizer.decode(tokens).replace(current, \"\", 1), end=\"\")\n",
    "    \n",
    "    current = tokenizer.decode(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "aca6f531-0267-44dc-a632-8d249c0bf3fd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<s> The chemical formula for water is H2O. What does it mean?\n",
      "The chemical formula for water is H2O. What does it mean?\n",
      "Water is the purest liquid on Earth. It is the basis of life. Water is found in the soil, rivers, lakes, oceans, and the ocean. Water is also found in our bodies. Water is found in everything we take in on a daily basis. Water is essential for life. Water is found in the cells, tissues, and organs of all living things. Water is a key element of life because it enables the creation and maintenance of the various chemical and physical processes"
     ]
    }
   ],
   "source": [
    "tokens = tokenizer('''The chemical formula for water is''')['input_ids']\n",
    "\n",
    "current = tokenizer.decode(tokens)\n",
    "print(current, end=\"\")\n",
    "\n",
    "temperature = 7e-1\n",
    "\n",
    "for _ in range(128):\n",
    "\n",
    "    tok = torch.tensor(tokens).reshape(1, -1)\n",
    "    \n",
    "    logits = model(tok)\n",
    "\n",
    "    nxt = torch.multinomial(torch.softmax(logits[:, -1].float()/temperature, dim=-1).squeeze(), num_samples=1).item()\n",
    "    \n",
    "    tokens += [nxt]\n",
    "\n",
    "    print(tokenizer.decode(tokens).replace(current, \"\", 1), end=\"\")\n",
    "    \n",
    "    current = tokenizer.decode(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "72220140-d4c3-4f4a-9d8a-0677e66d8b05",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<s> The purpose of life is to build up the body’s strength, endurance, and energy reserves through the accumulation of acquired skills, and to get rid of worn or damaged parts of the body. All of this depends on day’s activities and deeds. The process of building up the body and taking on new challenges, such as accumulating health, will require the use of skills and abilities.\n",
      "The main purpose of building up skills and abilities in life is to make new people capable of doing the things that they need to do. This process requires you to develop skills that are applicable to everyday life. Skills can either be formal, or in the"
     ]
    }
   ],
   "source": [
    "tokens = tokenizer('''The purpose of life is to''')['input_ids']\n",
    "\n",
    "current = tokenizer.decode(tokens)\n",
    "print(current, end=\"\")\n",
    "\n",
    "temperature = 8e-1\n",
    "\n",
    "for _ in range(128):\n",
    "\n",
    "    tok = torch.tensor(tokens).reshape(1, -1)\n",
    "    \n",
    "    logits = model(tok)\n",
    "\n",
    "    nxt = torch.multinomial(torch.softmax(logits[:, -1].float()/temperature, dim=-1).squeeze(), num_samples=1).item()\n",
    "    \n",
    "    tokens += [nxt]\n",
    "\n",
    "    print(tokenizer.decode(tokens).replace(current, \"\", 1), end=\"\")\n",
    "    \n",
    "    current = tokenizer.decode(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "a6f71778-01e8-40b0-b4c0-ef3e7e934e66",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<s> Charles Darwin: The Origin of Species\n",
      "Suggested Citation: (Author's description, 2007-08-02)\n",
      "In the early 1900s the scientific community found that Darwin's theories would provide a mechanism for the further evolutionary history of living beings, assuming there was not been a series of intelligent, representative \"histors\" of life. Through careful research, Darwin's theory of the Origin of Species proved to be compatible with a single evolutionary process, that of speciation (Darwin, 1886). In other words, Darwin had"
     ]
    }
   ],
   "source": [
    "tokens = tokenizer('''Charles Darwin''')['input_ids']\n",
    "\n",
    "current = tokenizer.decode(tokens)\n",
    "print(current, end=\"\")\n",
    "\n",
    "temperature = 8e-1\n",
    "\n",
    "for _ in range(128):\n",
    "\n",
    "    tok = torch.tensor(tokens).reshape(1, -1)\n",
    "    \n",
    "    logits = model(tok)\n",
    "\n",
    "    nxt = torch.multinomial(torch.softmax(logits[:, -1].float()/temperature, dim=-1).squeeze(), num_samples=1).item()\n",
    "    \n",
    "    tokens += [nxt]\n",
    "\n",
    "    print(tokenizer.decode(tokens).replace(current, \"\", 1), end=\"\")\n",
    "    \n",
    "    current = tokenizer.decode(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6a6e1068-b3a8-4019-9e31-5336a337a25a",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}