Spaces:
Running
Running
Update to handle Apriel-1.5-15b format and multiple endpoints list
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import
|
| 2 |
from uuid import uuid4
|
| 3 |
|
| 4 |
from openai import OpenAI
|
|
@@ -6,14 +6,15 @@ import gradio as gr
|
|
| 6 |
|
| 7 |
from theme import apriel
|
| 8 |
from utils import COMMUNITY_POSTFIX_URL, get_model_config, check_format, models_config, \
|
| 9 |
-
logged_event_handler, DEBUG_MODEL, log_debug, log_info, log_error
|
| 10 |
from log_chat import log_chat
|
| 11 |
|
| 12 |
MODEL_TEMPERATURE = 0.8
|
| 13 |
BUTTON_WIDTH = 160
|
| 14 |
-
DEFAULT_OPT_OUT_VALUE =
|
| 15 |
|
| 16 |
-
|
|
|
|
| 17 |
|
| 18 |
BUTTON_ENABLED = gr.update(interactive=True)
|
| 19 |
BUTTON_DISABLED = gr.update(interactive=False)
|
|
@@ -31,6 +32,9 @@ chat_start_count = 0
|
|
| 31 |
model_config = {}
|
| 32 |
openai_client = None
|
| 33 |
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
def app_loaded(state, request: gr.Request):
|
| 36 |
message_html = setup_model(DEFAULT_MODEL_NAME, intial=False)
|
|
@@ -45,21 +49,28 @@ def update_model_and_clear_chat(model_name):
|
|
| 45 |
return desc, []
|
| 46 |
|
| 47 |
|
| 48 |
-
def setup_model(
|
| 49 |
-
global model_config, openai_client
|
| 50 |
-
model_config = get_model_config(
|
| 51 |
log_debug(f"update_model() --> Model config: {model_config}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
openai_client = OpenAI(
|
| 53 |
api_key=model_config.get('AUTH_TOKEN'),
|
| 54 |
-
base_url=
|
| 55 |
)
|
|
|
|
| 56 |
|
| 57 |
_model_hf_name = model_config.get("MODEL_HF_URL").split('https://huggingface.co/')[1]
|
| 58 |
_link = f"<a href='{model_config.get('MODEL_HF_URL')}{COMMUNITY_POSTFIX_URL}' target='_blank'>{_model_hf_name}</a>"
|
| 59 |
_description = f"We'd love to hear your thoughts on the model. Click here to provide feedback - {_link}"
|
| 60 |
|
| 61 |
-
log_debug(f"Switched to model {_model_hf_name}")
|
| 62 |
-
|
| 63 |
if intial:
|
| 64 |
return
|
| 65 |
else:
|
|
@@ -95,6 +106,9 @@ def run_chat_inference(history, message, state):
|
|
| 95 |
error = None
|
| 96 |
model_name = model_config.get('MODEL_NAME')
|
| 97 |
|
|
|
|
|
|
|
|
|
|
| 98 |
if len(history) == 0:
|
| 99 |
state["chat_id"] = uuid4().hex
|
| 100 |
|
|
@@ -187,8 +201,9 @@ def run_chat_inference(history, message, state):
|
|
| 187 |
log_debug(f"chat_fn() --> Stopping streaming...")
|
| 188 |
break # Exit the loop if the stop flag is set
|
| 189 |
# Extract the new content from the delta field
|
| 190 |
-
content = getattr(chunk.choices[0].delta, "content", "")
|
| 191 |
-
|
|
|
|
| 192 |
|
| 193 |
if is_reasoning:
|
| 194 |
parts = output.split("[BEGIN FINAL RESPONSE]")
|
|
@@ -198,8 +213,12 @@ def run_chat_inference(history, message, state):
|
|
| 198 |
parts[1] = parts[1].replace("[END FINAL RESPONSE]", "")
|
| 199 |
if parts[1].endswith("[END FINAL RESPONSE]\n<|end|>"):
|
| 200 |
parts[1] = parts[1].replace("[END FINAL RESPONSE]\n<|end|>", "")
|
|
|
|
|
|
|
| 201 |
if parts[1].endswith("<|end|>"):
|
| 202 |
parts[1] = parts[1].replace("<|end|>", "")
|
|
|
|
|
|
|
| 203 |
|
| 204 |
history[-1 if not completion_started else -2] = gr.ChatMessage(
|
| 205 |
role="assistant",
|
|
@@ -220,6 +239,8 @@ def run_chat_inference(history, message, state):
|
|
| 220 |
else:
|
| 221 |
if output.endswith("<|end|>"):
|
| 222 |
output = output.replace("<|end|>", "")
|
|
|
|
|
|
|
| 223 |
history[-1] = gr.ChatMessage(
|
| 224 |
role="assistant",
|
| 225 |
content=output
|
|
|
|
| 1 |
+
import random
|
| 2 |
from uuid import uuid4
|
| 3 |
|
| 4 |
from openai import OpenAI
|
|
|
|
| 6 |
|
| 7 |
from theme import apriel
|
| 8 |
from utils import COMMUNITY_POSTFIX_URL, get_model_config, check_format, models_config, \
|
| 9 |
+
logged_event_handler, DEBUG_MODE, DEBUG_MODEL, log_debug, log_info, log_error
|
| 10 |
from log_chat import log_chat
|
| 11 |
|
| 12 |
MODEL_TEMPERATURE = 0.8
|
| 13 |
BUTTON_WIDTH = 160
|
| 14 |
+
DEFAULT_OPT_OUT_VALUE = DEBUG_MODE
|
| 15 |
|
| 16 |
+
# If DEBUG_MODEL is True, use an alternative model (without reasoning) for testing
|
| 17 |
+
DEFAULT_MODEL_NAME = "Apriel-1.5-15B-thinker" if not DEBUG_MODEL else "Apriel-1.5-15B-thinker" # "Apriel-5b"
|
| 18 |
|
| 19 |
BUTTON_ENABLED = gr.update(interactive=True)
|
| 20 |
BUTTON_DISABLED = gr.update(interactive=False)
|
|
|
|
| 32 |
model_config = {}
|
| 33 |
openai_client = None
|
| 34 |
|
| 35 |
+
USE_RANDOM_ENDPOINT = False
|
| 36 |
+
endpoint_rotation_count = 0
|
| 37 |
+
|
| 38 |
|
| 39 |
def app_loaded(state, request: gr.Request):
|
| 40 |
message_html = setup_model(DEFAULT_MODEL_NAME, intial=False)
|
|
|
|
| 49 |
return desc, []
|
| 50 |
|
| 51 |
|
| 52 |
+
def setup_model(model_key, intial=False):
|
| 53 |
+
global model_config, openai_client, endpoint_rotation_count
|
| 54 |
+
model_config = get_model_config(model_key)
|
| 55 |
log_debug(f"update_model() --> Model config: {model_config}")
|
| 56 |
+
|
| 57 |
+
url_list = (model_config.get('VLLM_API_URL_LIST') or "").split(",")
|
| 58 |
+
if USE_RANDOM_ENDPOINT:
|
| 59 |
+
base_url = random.choice(url_list) if len(url_list) > 0 else model_config.get('VLLM_API_URL')
|
| 60 |
+
else:
|
| 61 |
+
base_url = url_list[endpoint_rotation_count % len(url_list)]
|
| 62 |
+
endpoint_rotation_count += 1
|
| 63 |
+
|
| 64 |
openai_client = OpenAI(
|
| 65 |
api_key=model_config.get('AUTH_TOKEN'),
|
| 66 |
+
base_url=base_url
|
| 67 |
)
|
| 68 |
+
log_debug(f"Switched to model {model_key} using endpoint {base_url}")
|
| 69 |
|
| 70 |
_model_hf_name = model_config.get("MODEL_HF_URL").split('https://huggingface.co/')[1]
|
| 71 |
_link = f"<a href='{model_config.get('MODEL_HF_URL')}{COMMUNITY_POSTFIX_URL}' target='_blank'>{_model_hf_name}</a>"
|
| 72 |
_description = f"We'd love to hear your thoughts on the model. Click here to provide feedback - {_link}"
|
| 73 |
|
|
|
|
|
|
|
| 74 |
if intial:
|
| 75 |
return
|
| 76 |
else:
|
|
|
|
| 106 |
error = None
|
| 107 |
model_name = model_config.get('MODEL_NAME')
|
| 108 |
|
| 109 |
+
# Reinitialize the OpenAI client with a random endpoint from the list
|
| 110 |
+
setup_model(model_config.get('MODEL_KEY'))
|
| 111 |
+
|
| 112 |
if len(history) == 0:
|
| 113 |
state["chat_id"] = uuid4().hex
|
| 114 |
|
|
|
|
| 201 |
log_debug(f"chat_fn() --> Stopping streaming...")
|
| 202 |
break # Exit the loop if the stop flag is set
|
| 203 |
# Extract the new content from the delta field
|
| 204 |
+
content = getattr(chunk.choices[0].delta, "content", "") or ""
|
| 205 |
+
reasoning_content = getattr(chunk.choices[0].delta, "reasoning_content", "") or ""
|
| 206 |
+
output += reasoning_content + content
|
| 207 |
|
| 208 |
if is_reasoning:
|
| 209 |
parts = output.split("[BEGIN FINAL RESPONSE]")
|
|
|
|
| 213 |
parts[1] = parts[1].replace("[END FINAL RESPONSE]", "")
|
| 214 |
if parts[1].endswith("[END FINAL RESPONSE]\n<|end|>"):
|
| 215 |
parts[1] = parts[1].replace("[END FINAL RESPONSE]\n<|end|>", "")
|
| 216 |
+
if parts[1].endswith("[END FINAL RESPONSE]\n<|end|>\n"):
|
| 217 |
+
parts[1] = parts[1].replace("[END FINAL RESPONSE]\n<|end|>\n", "")
|
| 218 |
if parts[1].endswith("<|end|>"):
|
| 219 |
parts[1] = parts[1].replace("<|end|>", "")
|
| 220 |
+
if parts[1].endswith("<|end|>\n"):
|
| 221 |
+
parts[1] = parts[1].replace("<|end|>\n", "")
|
| 222 |
|
| 223 |
history[-1 if not completion_started else -2] = gr.ChatMessage(
|
| 224 |
role="assistant",
|
|
|
|
| 239 |
else:
|
| 240 |
if output.endswith("<|end|>"):
|
| 241 |
output = output.replace("<|end|>", "")
|
| 242 |
+
if output.endswith("<|end|>\n"):
|
| 243 |
+
output = output.replace("<|end|>\n", "")
|
| 244 |
history[-1] = gr.ChatMessage(
|
| 245 |
role="assistant",
|
| 246 |
content=output
|
utils.py
CHANGED
|
@@ -12,27 +12,41 @@ DEBUG_MODE = False or os.environ.get("DEBUG_MODE") == "True"
|
|
| 12 |
DEBUG_MODEL = False or os.environ.get("DEBUG_MODEL") == "True"
|
| 13 |
|
| 14 |
models_config = {
|
| 15 |
-
"Apriel-
|
| 16 |
-
"MODEL_DISPLAY_NAME": "Apriel-
|
| 17 |
-
"MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-
|
| 18 |
-
"MODEL_NAME": os.environ.get("
|
| 19 |
-
"VLLM_API_URL": os.environ.get("
|
|
|
|
| 20 |
"AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 21 |
-
"REASONING": True
|
|
|
|
| 22 |
},
|
| 23 |
-
"Apriel-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
}
|
| 32 |
|
| 33 |
|
| 34 |
def get_model_config(model_name: str) -> dict:
|
| 35 |
config = models_config.get(model_name)
|
|
|
|
|
|
|
| 36 |
if not config:
|
| 37 |
raise ValueError(f"Model {model_name} not found in models_config")
|
| 38 |
if not config.get("MODEL_NAME"):
|
|
|
|
| 12 |
DEBUG_MODEL = False or os.environ.get("DEBUG_MODEL") == "True"
|
| 13 |
|
| 14 |
models_config = {
|
| 15 |
+
"Apriel-1.5-15B-thinker": {
|
| 16 |
+
"MODEL_DISPLAY_NAME": "Apriel-1.5-15B-thinker",
|
| 17 |
+
"MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-1.5-15b-Thinker",
|
| 18 |
+
"MODEL_NAME": os.environ.get("MODEL_NAME_APRIEL_1_5_15B"),
|
| 19 |
+
"VLLM_API_URL": os.environ.get("VLLM_API_URL_APRIEL_1_5_15B"),
|
| 20 |
+
"VLLM_API_URL_LIST": os.environ.get("VLLM_API_URL_LIST_APRIEL_1_5_15B"),
|
| 21 |
"AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 22 |
+
"REASONING": True,
|
| 23 |
+
"MULTIMODAL": True
|
| 24 |
},
|
| 25 |
+
# "Apriel-Nemotron-15b-Thinker": {
|
| 26 |
+
# "MODEL_DISPLAY_NAME": "Apriel-Nemotron-15b-Thinker",
|
| 27 |
+
# "MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-Nemotron-15b-Thinker",
|
| 28 |
+
# "MODEL_NAME": os.environ.get("MODEL_NAME_NEMO_15B"),
|
| 29 |
+
# "VLLM_API_URL": os.environ.get("VLLM_API_URL_NEMO_15B"),
|
| 30 |
+
# "AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 31 |
+
# "REASONING": True,
|
| 32 |
+
# "MULTIMODAL": False
|
| 33 |
+
# },
|
| 34 |
+
# "Apriel-5b": {
|
| 35 |
+
# "MODEL_DISPLAY_NAME": "Apriel-5b",
|
| 36 |
+
# "MODEL_HF_URL": "https://huggingface.co/ServiceNow-AI/Apriel-5B-Instruct",
|
| 37 |
+
# "MODEL_NAME": os.environ.get("MODEL_NAME_5B"),
|
| 38 |
+
# "VLLM_API_URL": os.environ.get("VLLM_API_URL_5B"),
|
| 39 |
+
# "AUTH_TOKEN": os.environ.get("AUTH_TOKEN"),
|
| 40 |
+
# "REASONING": False,
|
| 41 |
+
# "MULTIMODAL": False
|
| 42 |
+
# }
|
| 43 |
}
|
| 44 |
|
| 45 |
|
| 46 |
def get_model_config(model_name: str) -> dict:
|
| 47 |
config = models_config.get(model_name)
|
| 48 |
+
config['MODEL_KEY'] = model_name
|
| 49 |
+
|
| 50 |
if not config:
|
| 51 |
raise ValueError(f"Model {model_name} not found in models_config")
|
| 52 |
if not config.get("MODEL_NAME"):
|