Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from huggingface_hub import InferenceClient
|
| 3 |
import os
|
| 4 |
import json
|
| 5 |
|
|
@@ -20,7 +20,7 @@ def save_prompts():
|
|
| 20 |
with open(PROMPTS_FILE, "w") as file:
|
| 21 |
json.dump(system_prompts, file, indent=4)
|
| 22 |
|
| 23 |
-
def chat_with_model(user_input, system_prompt):
|
| 24 |
"""Send user input to the model and return its response."""
|
| 25 |
messages = [
|
| 26 |
{"role": "system", "content": system_prompt},
|
|
@@ -29,7 +29,7 @@ def chat_with_model(user_input, system_prompt):
|
|
| 29 |
|
| 30 |
try:
|
| 31 |
result = client.chat.completions.create(
|
| 32 |
-
model=
|
| 33 |
messages=messages,
|
| 34 |
temperature=0.5,
|
| 35 |
max_tokens=2048,
|
|
@@ -50,32 +50,50 @@ def get_prompt(name):
|
|
| 50 |
"""Retrieve a system prompt by name."""
|
| 51 |
return system_prompts.get(name, "")
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
# Gradio Interface
|
| 54 |
with gr.Blocks() as demo:
|
| 55 |
-
gr.Markdown("## Hugging Face Chatbot with
|
| 56 |
|
| 57 |
with gr.Row():
|
| 58 |
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
system_prompt_name = gr.Dropdown(choices=list(system_prompts.keys()), label="Select System Prompt")
|
| 60 |
system_prompt_content = gr.TextArea(label="System Prompt", value=get_prompt("default"), lines=4)
|
| 61 |
save_prompt_button = gr.Button("Save System Prompt")
|
| 62 |
|
| 63 |
user_input = gr.TextArea(label="Enter your prompt", placeholder="Describe the character or request a detailed description...", lines=4)
|
| 64 |
submit_button = gr.Button("Generate")
|
| 65 |
-
|
| 66 |
with gr.Column():
|
| 67 |
output = gr.TextArea(label="Model Response", interactive=False, lines=10)
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
|
|
|
|
|
|
| 71 |
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
inputs=[system_prompt_name],
|
| 75 |
-
outputs=[system_prompt_name, system_prompt_content]
|
| 76 |
-
)
|
| 77 |
save_prompt_button.click(update_prompt, inputs=[system_prompt_name, system_prompt_content], outputs=[])
|
| 78 |
-
submit_button.click(chat_with_model, inputs=[user_input, system_prompt_content], outputs=[output])
|
| 79 |
|
| 80 |
# Run the app
|
| 81 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from huggingface_hub import InferenceClient, list_models
|
| 3 |
import os
|
| 4 |
import json
|
| 5 |
|
|
|
|
| 20 |
with open(PROMPTS_FILE, "w") as file:
|
| 21 |
json.dump(system_prompts, file, indent=4)
|
| 22 |
|
| 23 |
+
def chat_with_model(user_input, system_prompt, selected_model):
|
| 24 |
"""Send user input to the model and return its response."""
|
| 25 |
messages = [
|
| 26 |
{"role": "system", "content": system_prompt},
|
|
|
|
| 29 |
|
| 30 |
try:
|
| 31 |
result = client.chat.completions.create(
|
| 32 |
+
model=selected_model,
|
| 33 |
messages=messages,
|
| 34 |
temperature=0.5,
|
| 35 |
max_tokens=2048,
|
|
|
|
| 50 |
"""Retrieve a system prompt by name."""
|
| 51 |
return system_prompts.get(name, "")
|
| 52 |
|
| 53 |
+
def fetch_models(task):
|
| 54 |
+
"""Fetch models for a specific task from Hugging Face Hub."""
|
| 55 |
+
try:
|
| 56 |
+
models = list_models(filter=f"pipeline_tags:{task}")
|
| 57 |
+
return [model.modelId for model in models]
|
| 58 |
+
except Exception as e:
|
| 59 |
+
return [f"Error fetching models: {str(e)}"]
|
| 60 |
+
|
| 61 |
# Gradio Interface
|
| 62 |
with gr.Blocks() as demo:
|
| 63 |
+
gr.Markdown("## Hugging Face Chatbot with Dynamic Model Selection")
|
| 64 |
|
| 65 |
with gr.Row():
|
| 66 |
with gr.Column():
|
| 67 |
+
# Task selection
|
| 68 |
+
task_selector = gr.Dropdown(
|
| 69 |
+
choices=["text-generation", "image-classification", "text-classification", "translation"],
|
| 70 |
+
label="Select Task",
|
| 71 |
+
value="text-generation"
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Model selector
|
| 75 |
+
model_selector = gr.Dropdown(choices=[], label="Select Model")
|
| 76 |
+
|
| 77 |
+
# System prompt and input
|
| 78 |
system_prompt_name = gr.Dropdown(choices=list(system_prompts.keys()), label="Select System Prompt")
|
| 79 |
system_prompt_content = gr.TextArea(label="System Prompt", value=get_prompt("default"), lines=4)
|
| 80 |
save_prompt_button = gr.Button("Save System Prompt")
|
| 81 |
|
| 82 |
user_input = gr.TextArea(label="Enter your prompt", placeholder="Describe the character or request a detailed description...", lines=4)
|
| 83 |
submit_button = gr.Button("Generate")
|
| 84 |
+
|
| 85 |
with gr.Column():
|
| 86 |
output = gr.TextArea(label="Model Response", interactive=False, lines=10)
|
| 87 |
|
| 88 |
+
# Update model list when task changes
|
| 89 |
+
def update_model_list(task):
|
| 90 |
+
models = fetch_models(task)
|
| 91 |
+
return gr.Dropdown.update(choices=models, value=models[0] if models else None)
|
| 92 |
|
| 93 |
+
# Event bindings
|
| 94 |
+
task_selector.change(update_model_list, inputs=[task_selector], outputs=[model_selector])
|
|
|
|
|
|
|
|
|
|
| 95 |
save_prompt_button.click(update_prompt, inputs=[system_prompt_name, system_prompt_content], outputs=[])
|
| 96 |
+
submit_button.click(chat_with_model, inputs=[user_input, system_prompt_content, model_selector], outputs=[output])
|
| 97 |
|
| 98 |
# Run the app
|
| 99 |
+
demo.launch()
|