Commit
·
a9aff6a
1
Parent(s):
2e4d2e9
Update api_usage.py
Browse files- api_usage.py +16 -7
api_usage.py
CHANGED
|
@@ -7,9 +7,10 @@ import openai
|
|
| 7 |
queryUrl = 'https://api.openai.com/v1/chat/completions'
|
| 8 |
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
|
| 9 |
rate_limit_per_model = {
|
| 10 |
-
"gpt-3.5-turbo":
|
|
|
|
| 11 |
"gpt-4": 200,
|
| 12 |
-
"gpt-4-32k":
|
| 13 |
}
|
| 14 |
body_turbo = {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
| 15 |
body_gpt4 = {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
|
@@ -60,12 +61,20 @@ def get_subscription(key):
|
|
| 60 |
# return ""
|
| 61 |
|
| 62 |
def check_key_type(model, rpm):
|
| 63 |
-
if
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
| 67 |
else:
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
def check_gpt4_availability():
|
| 71 |
if check_key_availability():
|
|
|
|
| 7 |
queryUrl = 'https://api.openai.com/v1/chat/completions'
|
| 8 |
GPT_TYPES = ["gpt-3.5-turbo", "gpt-4", "gpt-4-32k"]
|
| 9 |
rate_limit_per_model = {
|
| 10 |
+
"gpt-3.5-turbo-new": 2000,
|
| 11 |
+
"gpt-3.5-turbo-old": 3500,
|
| 12 |
"gpt-4": 200,
|
| 13 |
+
"gpt-4-32k": 1000 # No actual clue, rare enough
|
| 14 |
}
|
| 15 |
body_turbo = {"model": "gpt-3.5-turbo", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
| 16 |
body_gpt4 = {"model": "gpt-4", "max_tokens": 1, "messages": [{'role':'user', 'content': ''}]}
|
|
|
|
| 61 |
# return ""
|
| 62 |
|
| 63 |
def check_key_type(model, rpm):
|
| 64 |
+
if model == "gpt-3.5-turbo":
|
| 65 |
+
if rpm > rate_limit_per_model['gpt-3.5-turbo-old']:
|
| 66 |
+
return "yes | pay, possibly big key"
|
| 67 |
+
elif rpm == rate_limit_per_model['gpt-3.5-turbo-new'] or rpm == rate_limit_per_model['gpt-3.5-turbo-old']:
|
| 68 |
+
return "yes | pay"
|
| 69 |
+
else:
|
| 70 |
+
return "yes | trial"
|
| 71 |
else:
|
| 72 |
+
if rpm < rate_limit_per_model[model]:
|
| 73 |
+
return "yes | trial"
|
| 74 |
+
elif rpm == rate_limit_per_model[model]:
|
| 75 |
+
return "yes | pay"
|
| 76 |
+
else:
|
| 77 |
+
return "yes | pay, possibly big key"
|
| 78 |
|
| 79 |
def check_gpt4_availability():
|
| 80 |
if check_key_availability():
|