Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 12 |
# ----------------------------
|
| 13 |
|
| 14 |
DEFAULT_MODELS = [
|
| 15 |
-
"google/gemma-2-2b-it",
|
| 16 |
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
| 17 |
"Qwen/Qwen2.5-1.5B-Instruct",
|
| 18 |
]
|
|
@@ -125,7 +124,7 @@ def generate_batch_df(
|
|
| 125 |
|
| 126 |
def write_csv_path(df: pd.DataFrame) -> str:
|
| 127 |
ts = datetime.utcnow().strftime("%Y%m%d-%H%M%S")
|
| 128 |
-
tmp = tempfile.NamedTemporaryFile(prefix=f"
|
| 129 |
df.to_csv(tmp.name, index=False)
|
| 130 |
return tmp.name
|
| 131 |
|
|
@@ -137,7 +136,7 @@ def write_csv_path(df: pd.DataFrame) -> str:
|
|
| 137 |
with gr.Blocks(title="Multi-Prompt Chat (System Prompt Control)") as demo:
|
| 138 |
gr.Markdown(
|
| 139 |
"""
|
| 140 |
-
#
|
| 141 |
Pick a small free model, set a **system prompt**, and enter **multiple user prompts** (one per line).
|
| 142 |
Click **Generate** to get batched responses and a **downloadable CSV**.
|
| 143 |
"""
|
|
@@ -149,11 +148,11 @@ with gr.Blocks(title="Multi-Prompt Chat (System Prompt Control)") as demo:
|
|
| 149 |
choices=DEFAULT_MODELS,
|
| 150 |
value=DEFAULT_MODELS[0],
|
| 151 |
label="Model",
|
| 152 |
-
info="Free, small instruction-tuned models that run on CPU
|
| 153 |
)
|
| 154 |
system_prompt = gr.Textbox(
|
| 155 |
label="System prompt",
|
| 156 |
-
placeholder="e.g., You are an ecolinguistics-aware assistant that
|
| 157 |
lines=5,
|
| 158 |
)
|
| 159 |
prompts_multiline = gr.Textbox(
|
|
@@ -183,7 +182,7 @@ with gr.Blocks(title="Multi-Prompt Chat (System Prompt Control)") as demo:
|
|
| 183 |
)
|
| 184 |
|
| 185 |
# IMPORTANT: type="filepath" so we can return a string path
|
| 186 |
-
csv_out = gr.File(label="
|
| 187 |
|
| 188 |
# -------- Callback: generate table AND CSV path in one go --------
|
| 189 |
|
|
|
|
| 12 |
# ----------------------------
|
| 13 |
|
| 14 |
DEFAULT_MODELS = [
|
|
|
|
| 15 |
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
|
| 16 |
"Qwen/Qwen2.5-1.5B-Instruct",
|
| 17 |
]
|
|
|
|
| 124 |
|
| 125 |
def write_csv_path(df: pd.DataFrame) -> str:
|
| 126 |
ts = datetime.utcnow().strftime("%Y%m%d-%H%M%S")
|
| 127 |
+
tmp = tempfile.NamedTemporaryFile(prefix=f"Output_{ts}_", suffix=".csv", delete=False, dir="/tmp")
|
| 128 |
df.to_csv(tmp.name, index=False)
|
| 129 |
return tmp.name
|
| 130 |
|
|
|
|
| 136 |
with gr.Blocks(title="Multi-Prompt Chat (System Prompt Control)") as demo:
|
| 137 |
gr.Markdown(
|
| 138 |
"""
|
| 139 |
+
# Multi-Prompt Chat to test system prompt effects
|
| 140 |
Pick a small free model, set a **system prompt**, and enter **multiple user prompts** (one per line).
|
| 141 |
Click **Generate** to get batched responses and a **downloadable CSV**.
|
| 142 |
"""
|
|
|
|
| 148 |
choices=DEFAULT_MODELS,
|
| 149 |
value=DEFAULT_MODELS[0],
|
| 150 |
label="Model",
|
| 151 |
+
info="Free, small instruction-tuned models that run on CPU and free HF Space",
|
| 152 |
)
|
| 153 |
system_prompt = gr.Textbox(
|
| 154 |
label="System prompt",
|
| 155 |
+
placeholder="e.g., You are an ecolinguistics-aware assistant that always prioritise planetary well-being over anthropocentrism.",
|
| 156 |
lines=5,
|
| 157 |
)
|
| 158 |
prompts_multiline = gr.Textbox(
|
|
|
|
| 182 |
)
|
| 183 |
|
| 184 |
# IMPORTANT: type="filepath" so we can return a string path
|
| 185 |
+
csv_out = gr.File(label="CSV output", interactive=False, type="filepath")
|
| 186 |
|
| 187 |
# -------- Callback: generate table AND CSV path in one go --------
|
| 188 |
|