Spaces:
Runtime error
Runtime error
Commit
·
299e22b
1
Parent(s):
e6d141a
Refactor code for improved UI layout and readability
Browse files
app.py
CHANGED
|
@@ -12,6 +12,29 @@ import os
|
|
| 12 |
|
| 13 |
# Ensure you're logged in to Hugging Face
|
| 14 |
login(get_token())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
client = OpenAI(
|
| 17 |
base_url="https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct/v1",
|
|
@@ -33,6 +56,8 @@ def get_and_store_prompt():
|
|
| 33 |
|
| 34 |
|
| 35 |
def generate_blurb(prompt):
|
|
|
|
|
|
|
| 36 |
max_tokens = random.randint(100, 1000)
|
| 37 |
chat_completion = client.chat.completions.create(
|
| 38 |
model="tgi",
|
|
@@ -57,6 +82,7 @@ def log_blurb_and_vote(prompt, blurb, vote, user_info: gr.OAuthProfile | None, *
|
|
| 57 |
"blurb": blurb,
|
| 58 |
"vote": vote,
|
| 59 |
"user_id": user_id,
|
|
|
|
| 60 |
}
|
| 61 |
with open("blurb_log.jsonl", "a") as f:
|
| 62 |
f.write(json.dumps(log_entry) + "\n")
|
|
|
|
| 12 |
|
| 13 |
# Ensure you're logged in to Hugging Face
|
| 14 |
login(get_token())
|
| 15 |
+
# Define available models
|
| 16 |
+
MODELS = [
|
| 17 |
+
"meta-llama/Meta-Llama-3-70B-Instruct",
|
| 18 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 19 |
+
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
CHOSEN_MODEL = None
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def get_random_model():
|
| 26 |
+
global CHOSEN_MODEL
|
| 27 |
+
model = random.choice(MODELS)
|
| 28 |
+
CHOSEN_MODEL = model
|
| 29 |
+
return model
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def create_client(model_id):
|
| 33 |
+
return OpenAI(
|
| 34 |
+
base_url=f"https://api-inference.huggingface.co/models/{model_id}/v1",
|
| 35 |
+
api_key=get_token(),
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
|
| 39 |
client = OpenAI(
|
| 40 |
base_url="https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct/v1",
|
|
|
|
| 56 |
|
| 57 |
|
| 58 |
def generate_blurb(prompt):
|
| 59 |
+
model_id = get_random_model()
|
| 60 |
+
client = create_client(model_id)
|
| 61 |
max_tokens = random.randint(100, 1000)
|
| 62 |
chat_completion = client.chat.completions.create(
|
| 63 |
model="tgi",
|
|
|
|
| 82 |
"blurb": blurb,
|
| 83 |
"vote": vote,
|
| 84 |
"user_id": user_id,
|
| 85 |
+
"model": CHOSEN_MODEL,
|
| 86 |
}
|
| 87 |
with open("blurb_log.jsonl", "a") as f:
|
| 88 |
f.write(json.dumps(log_entry) + "\n")
|