Spaces:
Running
Running
Use Llama chat template
Browse files- gen_api_answer.py +1 -3
gen_api_answer.py
CHANGED
|
@@ -15,14 +15,12 @@ from prompts import (
|
|
| 15 |
FLOW_JUDGE_PROMPT
|
| 16 |
)
|
| 17 |
from transformers import AutoTokenizer
|
| 18 |
-
from huggingface_hub import login
|
| 19 |
|
| 20 |
# Initialize clients
|
| 21 |
anthropic_client = anthropic.Anthropic()
|
| 22 |
openai_client = OpenAI()
|
| 23 |
together_client = Together()
|
| 24 |
hf_api_key = os.getenv("HF_API_KEY")
|
| 25 |
-
login(hf_api_key)
|
| 26 |
flow_judge_api_key = os.getenv("FLOW_JUDGE_API_KEY")
|
| 27 |
cohere_client = cohere.ClientV2(os.getenv("CO_API_KEY"))
|
| 28 |
|
|
@@ -127,7 +125,7 @@ def get_atla_response(model_name, prompt, system_prompt=None, max_tokens=500, te
|
|
| 127 |
messages.append({"role": "user", "content": prompt})
|
| 128 |
|
| 129 |
# Apply chat template
|
| 130 |
-
model_id = "
|
| 131 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 132 |
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 133 |
|
|
|
|
| 15 |
FLOW_JUDGE_PROMPT
|
| 16 |
)
|
| 17 |
from transformers import AutoTokenizer
|
|
|
|
| 18 |
|
| 19 |
# Initialize clients
|
| 20 |
anthropic_client = anthropic.Anthropic()
|
| 21 |
openai_client = OpenAI()
|
| 22 |
together_client = Together()
|
| 23 |
hf_api_key = os.getenv("HF_API_KEY")
|
|
|
|
| 24 |
flow_judge_api_key = os.getenv("FLOW_JUDGE_API_KEY")
|
| 25 |
cohere_client = cohere.ClientV2(os.getenv("CO_API_KEY"))
|
| 26 |
|
|
|
|
| 125 |
messages.append({"role": "user", "content": prompt})
|
| 126 |
|
| 127 |
# Apply chat template
|
| 128 |
+
model_id = "meta-llama/Llama-3.1-8B"
|
| 129 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 130 |
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 131 |
|