Update app.py
Browse files
app.py
CHANGED
|
@@ -33,12 +33,12 @@ def generate_text(message, history, max_tokens=512, temperature=0.9, top_p=0.95)
|
|
| 33 |
temp = ""
|
| 34 |
input_texts = [llama_o1_template(message)]
|
| 35 |
input_texts = [input_text.replace('<|end_of_text|>','') for input_text in input_texts]
|
| 36 |
-
print(f"input_texts[0]: {input_texts[0]}")
|
| 37 |
inputs = model.tokenize(input_texts[0].encode('utf-8'))
|
| 38 |
for token in model.generate(inputs, top_p=top_p, temp=temperature):
|
| 39 |
-
print(f"token: {token}")
|
| 40 |
text = model.detokenize([token])
|
| 41 |
-
print(f"text detok: {text}")
|
| 42 |
temp += text.decode('utf-8')
|
| 43 |
yield temp
|
| 44 |
|
|
|
|
| 33 |
temp = ""
|
| 34 |
input_texts = [llama_o1_template(message)]
|
| 35 |
input_texts = [input_text.replace('<|end_of_text|>','') for input_text in input_texts]
|
| 36 |
+
#print(f"input_texts[0]: {input_texts[0]}")
|
| 37 |
inputs = model.tokenize(input_texts[0].encode('utf-8'))
|
| 38 |
for token in model.generate(inputs, top_p=top_p, temp=temperature):
|
| 39 |
+
#print(f"token: {token}")
|
| 40 |
text = model.detokenize([token])
|
| 41 |
+
#print(f"text detok: {text}")
|
| 42 |
temp += text.decode('utf-8')
|
| 43 |
yield temp
|
| 44 |
|