Spaces:
Running
Running
Add Selene-1-Mini endpoint
Browse files- gen_api_answer.py +2 -2
gen_api_answer.py
CHANGED
|
@@ -125,7 +125,7 @@ def get_atla_response(model_name, prompt, system_prompt=None, max_tokens=500, te
|
|
| 125 |
messages.append({"role": "user", "content": prompt})
|
| 126 |
|
| 127 |
# Apply chat template
|
| 128 |
-
model_id = "AtlaAI/
|
| 129 |
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_api_key)
|
| 130 |
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 131 |
|
|
@@ -141,7 +141,7 @@ def get_atla_response(model_name, prompt, system_prompt=None, max_tokens=500, te
|
|
| 141 |
}
|
| 142 |
|
| 143 |
response = requests.post(
|
| 144 |
-
"https://
|
| 145 |
headers=headers,
|
| 146 |
json=payload
|
| 147 |
)
|
|
|
|
| 125 |
messages.append({"role": "user", "content": prompt})
|
| 126 |
|
| 127 |
# Apply chat template
|
| 128 |
+
model_id = "AtlaAI/Selene-1-Mini-Llama-3.1-8B"
|
| 129 |
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_api_key)
|
| 130 |
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 131 |
|
|
|
|
| 141 |
}
|
| 142 |
|
| 143 |
response = requests.post(
|
| 144 |
+
"https://bkp9p28gri93egqh.us-east-1.aws.endpoints.huggingface.cloud",
|
| 145 |
headers=headers,
|
| 146 |
json=payload
|
| 147 |
)
|