Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -39,7 +39,7 @@ def load_models(inp):
|
|
| 39 |
return out_box[0],out_box[1],out_box[2],out_box[3]
|
| 40 |
|
| 41 |
|
| 42 |
-
def
|
| 43 |
prompt = ""
|
| 44 |
if history:
|
| 45 |
#<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
|
|
@@ -49,9 +49,27 @@ def format_prompt(message, history):
|
|
| 49 |
prompt += f"{bot_response}\n"
|
| 50 |
print(prompt)
|
| 51 |
prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
|
| 52 |
-
print(prompt)
|
| 53 |
return prompt
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
mega_hist=[[],[],[],[]]
|
| 57 |
def chat_inf_tree(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
|
|
@@ -99,7 +117,7 @@ def chat_inf_a(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 99 |
seed=seed,
|
| 100 |
)
|
| 101 |
#formatted_prompt=prompt
|
| 102 |
-
formatted_prompt =
|
| 103 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 104 |
output = ""
|
| 105 |
for response in stream:
|
|
@@ -126,7 +144,7 @@ def chat_inf_b(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 126 |
seed=seed,
|
| 127 |
)
|
| 128 |
#formatted_prompt=prompt
|
| 129 |
-
formatted_prompt =
|
| 130 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 131 |
output = ""
|
| 132 |
for response in stream:
|
|
@@ -152,7 +170,7 @@ def chat_inf_c(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 152 |
seed=seed,
|
| 153 |
)
|
| 154 |
#formatted_prompt=prompt
|
| 155 |
-
formatted_prompt =
|
| 156 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 157 |
output = ""
|
| 158 |
for response in stream:
|
|
@@ -178,7 +196,7 @@ def chat_inf_d(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p
|
|
| 178 |
seed=seed,
|
| 179 |
)
|
| 180 |
#formatted_prompt=prompt
|
| 181 |
-
formatted_prompt =
|
| 182 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 183 |
output = ""
|
| 184 |
for response in stream:
|
|
|
|
| 39 |
return out_box[0],out_box[1],out_box[2],out_box[3]
|
| 40 |
|
| 41 |
|
| 42 |
+
def format_prompt_gemma(message, history):
|
| 43 |
prompt = ""
|
| 44 |
if history:
|
| 45 |
#<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
|
|
|
|
| 49 |
prompt += f"{bot_response}\n"
|
| 50 |
print(prompt)
|
| 51 |
prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
|
|
|
|
| 52 |
return prompt
|
| 53 |
|
| 54 |
+
|
| 55 |
+
def format_prompt_mixtral(message, history):
|
| 56 |
+
prompt = "<s>"
|
| 57 |
+
if history:
|
| 58 |
+
|
| 59 |
+
for user_prompt, bot_response in history:
|
| 60 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
| 61 |
+
prompt += f" {bot_response}</s> "
|
| 62 |
+
prompt += f"[INST] {message} [/INST]"
|
| 63 |
+
return prompt
|
| 64 |
+
def format_prompt_choose(message, history, model_name):
|
| 65 |
+
if "gemma" in model_name.lower():
|
| 66 |
+
return format_prompt_gemma(message,history)
|
| 67 |
+
if "mixtral" in model_name.lower():
|
| 68 |
+
return format_prompt_mixtral(message,history)
|
| 69 |
+
else:
|
| 70 |
+
return format_prompt_mixtral(message,history)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
|
| 74 |
mega_hist=[[],[],[],[]]
|
| 75 |
def chat_inf_tree(system_prompt,prompt,history,client_choice,seed,temp,tokens,top_p,rep_p,hid_val):
|
|
|
|
| 117 |
seed=seed,
|
| 118 |
)
|
| 119 |
#formatted_prompt=prompt
|
| 120 |
+
formatted_prompt = format_prompt_choose(f"{system_prompt}, {prompt}", history, client_choice[0])
|
| 121 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 122 |
output = ""
|
| 123 |
for response in stream:
|
|
|
|
| 144 |
seed=seed,
|
| 145 |
)
|
| 146 |
#formatted_prompt=prompt
|
| 147 |
+
formatted_prompt = format_prompt_choose(f"{system_prompt}, {prompt}", history, client_choice[1])
|
| 148 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 149 |
output = ""
|
| 150 |
for response in stream:
|
|
|
|
| 170 |
seed=seed,
|
| 171 |
)
|
| 172 |
#formatted_prompt=prompt
|
| 173 |
+
formatted_prompt = format_prompt_choose(f"{system_prompt}, {prompt}", history, client_choice[2])
|
| 174 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 175 |
output = ""
|
| 176 |
for response in stream:
|
|
|
|
| 196 |
seed=seed,
|
| 197 |
)
|
| 198 |
#formatted_prompt=prompt
|
| 199 |
+
formatted_prompt = format_prompt_choose(f"{system_prompt}, {prompt}", history, client_choice[3])
|
| 200 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
| 201 |
output = ""
|
| 202 |
for response in stream:
|