Spaces:
Running
Running
Update variables.py
Browse files- variables.py +17 -16
variables.py
CHANGED
|
@@ -1,15 +1,21 @@
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
|
| 4 |
-
# Load
|
| 5 |
-
|
| 6 |
-
prompt_data = json.loads(json_data)
|
| 7 |
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
metaprompt_explanations = {
|
| 10 |
-
key: data
|
| 11 |
for key, data in prompt_data.items()
|
| 12 |
-
}
|
| 13 |
|
| 14 |
# Generate markdown explanation
|
| 15 |
explanation_markdown = "".join([
|
|
@@ -19,7 +25,6 @@ explanation_markdown = "".join([
|
|
| 19 |
|
| 20 |
# Define models list
|
| 21 |
models = [
|
| 22 |
-
# Meta-Llama models (all support system)
|
| 23 |
"meta-llama/Meta-Llama-3-70B-Instruct",
|
| 24 |
"meta-llama/Meta-Llama-3-8B-Instruct",
|
| 25 |
"meta-llama/Llama-3.1-70B-Instruct",
|
|
@@ -28,25 +33,21 @@ models = [
|
|
| 28 |
"meta-llama/Llama-3.2-1B-Instruct",
|
| 29 |
"meta-llama/Llama-2-13b-chat-hf",
|
| 30 |
"meta-llama/Llama-2-7b-chat-hf",
|
| 31 |
-
|
| 32 |
-
# HuggingFaceH4 models (support system)
|
| 33 |
"HuggingFaceH4/zephyr-7b-beta",
|
| 34 |
"HuggingFaceH4/zephyr-7b-alpha",
|
| 35 |
-
|
| 36 |
-
# Qwen models (support system)
|
| 37 |
"Qwen/Qwen2.5-72B-Instruct",
|
| 38 |
"Qwen/Qwen2.5-1.5B",
|
| 39 |
-
|
| 40 |
"microsoft/Phi-3.5-mini-instruct"
|
| 41 |
]
|
| 42 |
|
| 43 |
-
#
|
| 44 |
api_token = os.getenv('HF_API_TOKEN')
|
| 45 |
if not api_token:
|
| 46 |
raise ValueError("HF_API_TOKEN not found in environment variables")
|
| 47 |
|
| 48 |
-
#
|
| 49 |
meta_prompts = {
|
| 50 |
-
key: data
|
| 51 |
for key, data in prompt_data.items()
|
| 52 |
-
}
|
|
|
|
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
|
| 4 |
+
# Load templates from environment variable with a safe default
|
| 5 |
+
templates_json = os.getenv('PROMPT_TEMPLATES', '{}')
|
|
|
|
| 6 |
|
| 7 |
+
try:
|
| 8 |
+
# Parse JSON data with error handling
|
| 9 |
+
prompt_data = json.loads(templates_json)
|
| 10 |
+
except json.JSONDecodeError:
|
| 11 |
+
# Fallback to empty dict if JSON is invalid
|
| 12 |
+
prompt_data = {}
|
| 13 |
+
|
| 14 |
+
# Create explanations dictionary with safe access
|
| 15 |
metaprompt_explanations = {
|
| 16 |
+
key: data.get("description", "No description available")
|
| 17 |
for key, data in prompt_data.items()
|
| 18 |
+
} if prompt_data else {}
|
| 19 |
|
| 20 |
# Generate markdown explanation
|
| 21 |
explanation_markdown = "".join([
|
|
|
|
| 25 |
|
| 26 |
# Define models list
|
| 27 |
models = [
|
|
|
|
| 28 |
"meta-llama/Meta-Llama-3-70B-Instruct",
|
| 29 |
"meta-llama/Meta-Llama-3-8B-Instruct",
|
| 30 |
"meta-llama/Llama-3.1-70B-Instruct",
|
|
|
|
| 33 |
"meta-llama/Llama-3.2-1B-Instruct",
|
| 34 |
"meta-llama/Llama-2-13b-chat-hf",
|
| 35 |
"meta-llama/Llama-2-7b-chat-hf",
|
|
|
|
|
|
|
| 36 |
"HuggingFaceH4/zephyr-7b-beta",
|
| 37 |
"HuggingFaceH4/zephyr-7b-alpha",
|
|
|
|
|
|
|
| 38 |
"Qwen/Qwen2.5-72B-Instruct",
|
| 39 |
"Qwen/Qwen2.5-1.5B",
|
|
|
|
| 40 |
"microsoft/Phi-3.5-mini-instruct"
|
| 41 |
]
|
| 42 |
|
| 43 |
+
# Get API token with error handling
|
| 44 |
api_token = os.getenv('HF_API_TOKEN')
|
| 45 |
if not api_token:
|
| 46 |
raise ValueError("HF_API_TOKEN not found in environment variables")
|
| 47 |
|
| 48 |
+
# Create meta_prompts dictionary with safe access
|
| 49 |
meta_prompts = {
|
| 50 |
+
key: data.get("template", "No template available")
|
| 51 |
for key, data in prompt_data.items()
|
| 52 |
+
} if prompt_data else {}
|
| 53 |
+
|