Update proxy_server.py
Browse files- proxy_server.py +22 -45
proxy_server.py
CHANGED
|
@@ -13,6 +13,27 @@ sys.path.insert(
|
|
| 13 |
0, os.path.abspath("../..")
|
| 14 |
) # Adds the parent directory to the system path - for litellm local dev
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
try:
|
| 17 |
import fastapi
|
| 18 |
import backoff
|
|
@@ -22,48 +43,6 @@ try:
|
|
| 22 |
except ImportError as e:
|
| 23 |
raise ImportError(f"Missing dependency {e}. Run `pip install 'litellm[proxy]'`")
|
| 24 |
|
| 25 |
-
import random
|
| 26 |
-
|
| 27 |
-
list_of_messages = [
|
| 28 |
-
"'The thing I wish you improved is...'",
|
| 29 |
-
"'A feature I really want is...'",
|
| 30 |
-
"'The worst thing about this product is...'",
|
| 31 |
-
"'This product would be better if...'",
|
| 32 |
-
"'I don't like how this works...'",
|
| 33 |
-
"'It would help me if you could add...'",
|
| 34 |
-
"'This feature doesn't meet my needs because...'",
|
| 35 |
-
"'I get frustrated when the product...'",
|
| 36 |
-
]
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
def generate_feedback_box():
|
| 40 |
-
box_width = 60
|
| 41 |
-
|
| 42 |
-
# Select a random message
|
| 43 |
-
message = random.choice(list_of_messages)
|
| 44 |
-
|
| 45 |
-
print() # noqa
|
| 46 |
-
print("\033[1;37m" + "#" + "-" * box_width + "#\033[0m") # noqa
|
| 47 |
-
print("\033[1;37m" + "#" + " " * box_width + "#\033[0m") # noqa
|
| 48 |
-
print("\033[1;37m" + "# {:^59} #\033[0m".format(message)) # noqa
|
| 49 |
-
print( # noqa
|
| 50 |
-
"\033[1;37m"
|
| 51 |
-
+ "# {:^59} #\033[0m".format("https://github.com/BerriAI/litellm/issues/new")
|
| 52 |
-
) # noqa
|
| 53 |
-
print("\033[1;37m" + "#" + " " * box_width + "#\033[0m") # noqa
|
| 54 |
-
print("\033[1;37m" + "#" + "-" * box_width + "#\033[0m") # noqa
|
| 55 |
-
print() # noqa
|
| 56 |
-
print(" Thank you for using LiteLLM! - Krrish & Ishaan") # noqa
|
| 57 |
-
print() # noqa
|
| 58 |
-
print() # noqa
|
| 59 |
-
print() # noqa
|
| 60 |
-
print( # noqa
|
| 61 |
-
"\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m"
|
| 62 |
-
) # noqa
|
| 63 |
-
print() # noqa
|
| 64 |
-
print() # noqa
|
| 65 |
-
|
| 66 |
-
|
| 67 |
import litellm
|
| 68 |
from litellm.proxy.utils import (
|
| 69 |
PrismaClient,
|
|
@@ -104,8 +83,7 @@ from typing import Union
|
|
| 104 |
app = FastAPI(
|
| 105 |
docs_url="/",
|
| 106 |
title="LiteLLM API",
|
| 107 |
-
description="Proxy Server to call 100+ LLMs in the OpenAI format\n\
|
| 108 |
-
)
|
| 109 |
router = APIRouter()
|
| 110 |
origins = ["*"]
|
| 111 |
|
|
@@ -1044,7 +1022,6 @@ async def initialize(
|
|
| 1044 |
config=None,
|
| 1045 |
):
|
| 1046 |
global user_model, user_api_base, user_debug, user_detailed_debug, user_user_max_tokens, user_request_timeout, user_temperature, user_telemetry, user_headers, experimental, llm_model_list, llm_router, general_settings, master_key, user_custom_auth, prisma_client
|
| 1047 |
-
generate_feedback_box()
|
| 1048 |
user_model = model
|
| 1049 |
user_debug = debug
|
| 1050 |
if debug == True: # this needs to be first, so users can see Router init debugg
|
|
|
|
| 13 |
0, os.path.abspath("../..")
|
| 14 |
) # Adds the parent directory to the system path - for litellm local dev
|
| 15 |
|
| 16 |
+
sample = """
|
| 17 |
+
from openai import OpenAI
|
| 18 |
+
import json
|
| 19 |
+
|
| 20 |
+
base_url = "https://ka1kuk-litellm.hf.space"
|
| 21 |
+
api_key = "hf_xxxx"
|
| 22 |
+
|
| 23 |
+
client = OpenAI(base_url=base_url, api_key=api_key)
|
| 24 |
+
|
| 25 |
+
messages = [{"role": "user", "content": "What's the capital of France?"}]
|
| 26 |
+
|
| 27 |
+
response = client.chat.completions.create(
|
| 28 |
+
model="huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 29 |
+
response_format={ "type": "json_object" },
|
| 30 |
+
messages=messages,
|
| 31 |
+
stream=False,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
print(response.choices[0].message.content)
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
try:
|
| 38 |
import fastapi
|
| 39 |
import backoff
|
|
|
|
| 43 |
except ImportError as e:
|
| 44 |
raise ImportError(f"Missing dependency {e}. Run `pip install 'litellm[proxy]'`")
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
import litellm
|
| 47 |
from litellm.proxy.utils import (
|
| 48 |
PrismaClient,
|
|
|
|
| 83 |
app = FastAPI(
|
| 84 |
docs_url="/",
|
| 85 |
title="LiteLLM API",
|
| 86 |
+
description="Proxy Server to call 100+ LLMs in the OpenAI format\n\nSsample with openai library:\n\n", sample)
|
|
|
|
| 87 |
router = APIRouter()
|
| 88 |
origins = ["*"]
|
| 89 |
|
|
|
|
| 1022 |
config=None,
|
| 1023 |
):
|
| 1024 |
global user_model, user_api_base, user_debug, user_detailed_debug, user_user_max_tokens, user_request_timeout, user_temperature, user_telemetry, user_headers, experimental, llm_model_list, llm_router, general_settings, master_key, user_custom_auth, prisma_client
|
|
|
|
| 1025 |
user_model = model
|
| 1026 |
user_debug = debug
|
| 1027 |
if debug == True: # this needs to be first, so users can see Router init debugg
|