Commit
·
c8d0523
1
Parent(s):
0f10e18
Update api_usage.py
Browse files- api_usage.py +7 -4
api_usage.py
CHANGED
|
@@ -50,7 +50,7 @@ def get_headers(key, org_id:str = None):
|
|
| 50 |
headers["OpenAI-Organization"] = org_id
|
| 51 |
return headers
|
| 52 |
|
| 53 |
-
def get_subscription(key):
|
| 54 |
has_gpt4 = False
|
| 55 |
has_gpt4_32k = False
|
| 56 |
default_org = ""
|
|
@@ -62,7 +62,7 @@ def get_subscription(key):
|
|
| 62 |
list_models = []
|
| 63 |
list_models_avai = set()
|
| 64 |
|
| 65 |
-
org_list = get_orgs(key)
|
| 66 |
|
| 67 |
for org_in in org_list:
|
| 68 |
available_models = get_models(key, org_in['id'])
|
|
@@ -125,8 +125,11 @@ def format_status(list_models_avai, headers):
|
|
| 125 |
rpm_num = int(r.headers.get("x-ratelimit-limit-requests", 0))
|
| 126 |
tpm_num = int(r.headers.get("x-ratelimit-limit-tokens_usage_based", 0))
|
| 127 |
tpm_left = int(r.headers.get("x-ratelimit-remaining-tokens_usage_based", 0))
|
| 128 |
-
|
| 129 |
-
|
|
|
|
|
|
|
|
|
|
| 130 |
if model == GPT_TYPES[0]:
|
| 131 |
quota = check_key_tier(tpm_num, TOKEN_LIMIT_PER_TIER_TURBO, headers)
|
| 132 |
#if model == GPT_TYPES[1]:
|
|
|
|
| 50 |
headers["OpenAI-Organization"] = org_id
|
| 51 |
return headers
|
| 52 |
|
| 53 |
+
def get_subscription(key, org_list):
|
| 54 |
has_gpt4 = False
|
| 55 |
has_gpt4_32k = False
|
| 56 |
default_org = ""
|
|
|
|
| 62 |
list_models = []
|
| 63 |
list_models_avai = set()
|
| 64 |
|
| 65 |
+
#org_list = get_orgs(key)
|
| 66 |
|
| 67 |
for org_in in org_list:
|
| 68 |
available_models = get_models(key, org_in['id'])
|
|
|
|
| 125 |
rpm_num = int(r.headers.get("x-ratelimit-limit-requests", 0))
|
| 126 |
tpm_num = int(r.headers.get("x-ratelimit-limit-tokens_usage_based", 0))
|
| 127 |
tpm_left = int(r.headers.get("x-ratelimit-remaining-tokens_usage_based", 0))
|
| 128 |
+
_rpm = '{:,}'.format(rpm_num).replace(',', ' ')
|
| 129 |
+
_tpm = '{:,}'.format(tpm_num).replace(',', ' ')
|
| 130 |
+
_tpm_left = '{:,}'.format(tpm_left).replace(',', ' ')
|
| 131 |
+
rpm.append(f"{_rpm} ({model})")
|
| 132 |
+
tpm.append(f"{_tpm} ({_tpm_left} left, {model})")
|
| 133 |
if model == GPT_TYPES[0]:
|
| 134 |
quota = check_key_tier(tpm_num, TOKEN_LIMIT_PER_TIER_TURBO, headers)
|
| 135 |
#if model == GPT_TYPES[1]:
|