Spaces:
Running
Running
:recycle: [Refactor] Rename message_streamer to huggingface_streamer
Browse files
apis/chat_api.py
CHANGED
|
@@ -19,9 +19,8 @@ from constants.envs import CONFIG
|
|
| 19 |
|
| 20 |
from messagers.message_composer import MessageComposer
|
| 21 |
from mocks.stream_chat_mocker import stream_chat_mock
|
| 22 |
-
from networks.
|
| 23 |
-
from
|
| 24 |
-
from constants.models import AVAILABLE_MODELS_DICTS
|
| 25 |
|
| 26 |
|
| 27 |
class ChatAPIApp:
|
|
@@ -90,19 +89,8 @@ class ChatAPIApp:
|
|
| 90 |
def chat_completions(
|
| 91 |
self, item: ChatCompletionsPostItem, api_key: str = Depends(extract_api_key)
|
| 92 |
):
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
composer.merge(messages=item.messages)
|
| 96 |
-
# streamer.chat = stream_chat_mock
|
| 97 |
-
|
| 98 |
-
stream_response = streamer.chat_response(
|
| 99 |
-
prompt=composer.merged_str,
|
| 100 |
-
temperature=item.temperature,
|
| 101 |
-
top_p=item.top_p,
|
| 102 |
-
max_new_tokens=item.max_tokens,
|
| 103 |
-
api_key=api_key,
|
| 104 |
-
use_cache=item.use_cache,
|
| 105 |
-
)
|
| 106 |
if item.stream:
|
| 107 |
event_source_response = EventSourceResponse(
|
| 108 |
streamer.chat_return_generator(stream_response),
|
|
|
|
| 19 |
|
| 20 |
from messagers.message_composer import MessageComposer
|
| 21 |
from mocks.stream_chat_mocker import stream_chat_mock
|
| 22 |
+
from networks.huggingface_streamer import HuggingfaceStreamer
|
| 23 |
+
from networks.openai_streamer import OpenaiStreamer
|
|
|
|
| 24 |
|
| 25 |
|
| 26 |
class ChatAPIApp:
|
|
|
|
| 89 |
def chat_completions(
|
| 90 |
self, item: ChatCompletionsPostItem, api_key: str = Depends(extract_api_key)
|
| 91 |
):
|
| 92 |
+
streamer = HuggingfaceStreamer(model=item.model)
|
| 93 |
+
composer = MessageComposer(model=item.model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
if item.stream:
|
| 95 |
event_source_response = EventSourceResponse(
|
| 96 |
streamer.chat_return_generator(stream_response),
|
networks/{message_streamer.py → huggingface_streamer.py}
RENAMED
|
@@ -2,8 +2,8 @@ import json
|
|
| 2 |
import re
|
| 3 |
import requests
|
| 4 |
|
|
|
|
| 5 |
from tclogger import logger
|
| 6 |
-
from tiktoken import get_encoding as tiktoken_get_encoding
|
| 7 |
from transformers import AutoTokenizer
|
| 8 |
|
| 9 |
from constants.models import (
|
|
@@ -12,11 +12,11 @@ from constants.models import (
|
|
| 12 |
TOKEN_LIMIT_MAP,
|
| 13 |
TOKEN_RESERVED,
|
| 14 |
)
|
| 15 |
-
from constants.
|
| 16 |
from messagers.message_outputer import OpenaiStreamOutputer
|
| 17 |
|
| 18 |
|
| 19 |
-
class
|
| 20 |
def __init__(self, model: str):
|
| 21 |
if model in MODEL_MAP.keys():
|
| 22 |
self.model = model
|
|
|
|
| 2 |
import re
|
| 3 |
import requests
|
| 4 |
|
| 5 |
+
|
| 6 |
from tclogger import logger
|
|
|
|
| 7 |
from transformers import AutoTokenizer
|
| 8 |
|
| 9 |
from constants.models import (
|
|
|
|
| 12 |
TOKEN_LIMIT_MAP,
|
| 13 |
TOKEN_RESERVED,
|
| 14 |
)
|
| 15 |
+
from constants.envs import PROXIES
|
| 16 |
from messagers.message_outputer import OpenaiStreamOutputer
|
| 17 |
|
| 18 |
|
| 19 |
+
class HuggingfaceStreamer:
|
| 20 |
def __init__(self, model: str):
|
| 21 |
if model in MODEL_MAP.keys():
|
| 22 |
self.model = model
|