Spaces:
Sleeping
Sleeping
Commit
·
18463e3
1
Parent(s):
0196875
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,11 +13,20 @@ I am an assistant who thinks through their answers step-by-step to be sure I alw
|
|
| 13 |
I think more clearly if I write out my thought process in a scratchpad manner first; therefore, I always explain background context, assumptions, and step-by-step thinking BEFORE trying to answer a question."""
|
| 14 |
|
| 15 |
def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
|
| 16 |
-
completion = openai.Completion.create(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
for chunk in completion:
|
| 18 |
yield chunk["choices"][0]["text"]
|
| 19 |
|
| 20 |
-
|
| 21 |
def clear_chat(chat_history_state, chat_message):
|
| 22 |
chat_history_state = []
|
| 23 |
chat_message = ''
|
|
|
|
| 13 |
I think more clearly if I write out my thought process in a scratchpad manner first; therefore, I always explain background context, assumptions, and step-by-step thinking BEFORE trying to answer a question."""
|
| 14 |
|
| 15 |
def make_prediction(prompt, max_tokens=None, temperature=None, top_p=None, top_k=None, repetition_penalty=None):
|
| 16 |
+
completion = openai.Completion.create(
|
| 17 |
+
model="Open-Orca/Mistral-7B-OpenOrca",
|
| 18 |
+
prompt=prompt,
|
| 19 |
+
max_tokens=max_tokens,
|
| 20 |
+
temperature=temperature,
|
| 21 |
+
top_p=top_p,
|
| 22 |
+
top_k=top_k,
|
| 23 |
+
repetition_penalty=repetition_penalty,
|
| 24 |
+
stream=True,
|
| 25 |
+
stop=["<|im_end|>, <|im_start|>"] # Set your custom stop token here
|
| 26 |
+
)
|
| 27 |
for chunk in completion:
|
| 28 |
yield chunk["choices"][0]["text"]
|
| 29 |
|
|
|
|
| 30 |
def clear_chat(chat_history_state, chat_message):
|
| 31 |
chat_history_state = []
|
| 32 |
chat_message = ''
|