Update api_usage.py
Browse files- api_usage.py +3 -36
api_usage.py
CHANGED
|
@@ -378,36 +378,6 @@ def get_azure_deploy(endpoint, api_key):
|
|
| 378 |
return deployments
|
| 379 |
except:
|
| 380 |
return None
|
| 381 |
-
|
| 382 |
-
def check_gpt4turbo(endpoint, api_key, deploy_id):
|
| 383 |
-
try:
|
| 384 |
-
if endpoint.startswith('http'):
|
| 385 |
-
url = f'{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2024-02-01'
|
| 386 |
-
else:
|
| 387 |
-
url = f'https://{endpoint}/openai/deployments/{deploy_id}/chat/completions?api-version=2024-02-01'
|
| 388 |
-
|
| 389 |
-
headers = {
|
| 390 |
-
'Content-Type': 'application/json',
|
| 391 |
-
'api-key': api_key,
|
| 392 |
-
'User-Agent': 'OpenAI/v1 PythonBindings/0.28.1',
|
| 393 |
-
}
|
| 394 |
-
|
| 395 |
-
data = {
|
| 396 |
-
"max_tokens": 9000,
|
| 397 |
-
"messages": [{ "role": "user", "content": "" }]
|
| 398 |
-
}
|
| 399 |
-
|
| 400 |
-
try:
|
| 401 |
-
rq = requests.post(url=url, headers=headers, json=data)
|
| 402 |
-
result = rq.json()
|
| 403 |
-
if result["error"]["code"] == "context_length_exceeded":
|
| 404 |
-
return False
|
| 405 |
-
else:
|
| 406 |
-
return True
|
| 407 |
-
except Exception as e:
|
| 408 |
-
return True
|
| 409 |
-
except Exception as e:
|
| 410 |
-
return False
|
| 411 |
|
| 412 |
def get_azure_status(endpoint, api_key, deployments_list):
|
| 413 |
# moderation check
|
|
@@ -421,7 +391,7 @@ def get_azure_status(endpoint, api_key, deployments_list):
|
|
| 421 |
|
| 422 |
has_32k = False
|
| 423 |
has_gpt4 = False
|
| 424 |
-
has_gpt4turbo = False
|
| 425 |
has_turbo = False
|
| 426 |
list_model = {}
|
| 427 |
for model, deploy in azure_deploy.items():
|
|
@@ -436,11 +406,8 @@ def get_azure_status(endpoint, api_key, deployments_list):
|
|
| 436 |
has_turbo = True
|
| 437 |
|
| 438 |
if not list_model: #has_32k == False and has_gpt4 == False and has_turbo == False:
|
| 439 |
-
return "No GPT deployment to check", has_32k,
|
| 440 |
else:
|
| 441 |
-
if has_gpt4:
|
| 442 |
-
has_gpt4turbo = check_gpt4turbo(endpoint, api_key, list_model['gpt-4'])
|
| 443 |
-
|
| 444 |
pozz_res = {}
|
| 445 |
|
| 446 |
for model, deployment in list_model.items():
|
|
@@ -470,7 +437,7 @@ def get_azure_status(endpoint, api_key, deployments_list):
|
|
| 470 |
|
| 471 |
except Exception as e:
|
| 472 |
pozz_res[model] = e
|
| 473 |
-
return pozz_res, has_32k,
|
| 474 |
|
| 475 |
def check_key_mistral_availability(key):
|
| 476 |
try:
|
|
|
|
| 378 |
return deployments
|
| 379 |
except:
|
| 380 |
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 381 |
|
| 382 |
def get_azure_status(endpoint, api_key, deployments_list):
|
| 383 |
# moderation check
|
|
|
|
| 391 |
|
| 392 |
has_32k = False
|
| 393 |
has_gpt4 = False
|
| 394 |
+
#has_gpt4turbo = False
|
| 395 |
has_turbo = False
|
| 396 |
list_model = {}
|
| 397 |
for model, deploy in azure_deploy.items():
|
|
|
|
| 406 |
has_turbo = True
|
| 407 |
|
| 408 |
if not list_model: #has_32k == False and has_gpt4 == False and has_turbo == False:
|
| 409 |
+
return "No GPT deployment to check", has_32k, has_gpt4, has_turbo
|
| 410 |
else:
|
|
|
|
|
|
|
|
|
|
| 411 |
pozz_res = {}
|
| 412 |
|
| 413 |
for model, deployment in list_model.items():
|
|
|
|
| 437 |
|
| 438 |
except Exception as e:
|
| 439 |
pozz_res[model] = e
|
| 440 |
+
return pozz_res, has_32k, has_gpt4, has_turbo
|
| 441 |
|
| 442 |
def check_key_mistral_availability(key):
|
| 443 |
try:
|