Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ from datetime import datetime
|
|
| 4 |
from typing import Optional, Union, List
|
| 5 |
import gradio as gr
|
| 6 |
from huggingface_hub import HfApi, Repository
|
| 7 |
-
from optimum_neuron_export import convert
|
| 8 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
| 9 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 10 |
|
|
@@ -30,22 +30,16 @@ TRANSFORMER_TASKS = {
|
|
| 30 |
"sentence-similarity": {"color": "#06b6d4", "category": "Similarity"},
|
| 31 |
}
|
| 32 |
|
| 33 |
-
# Define diffusion pipeline types
|
| 34 |
DIFFUSION_PIPELINES = {
|
| 35 |
-
"
|
| 36 |
-
"
|
| 37 |
-
"
|
| 38 |
-
"
|
| 39 |
-
"
|
| 40 |
-
"
|
| 41 |
-
"
|
| 42 |
-
"
|
| 43 |
-
"stable-diffusion-xl-inpaint": {"color": "#10b981", "category": "Stable Diffusion XL"},
|
| 44 |
-
"controlnet": {"color": "#f59e0b", "category": "ControlNet"},
|
| 45 |
-
"controlnet-xl": {"color": "#f59e0b", "category": "ControlNet XL"},
|
| 46 |
-
"pixart-alpha": {"color": "#ef4444", "category": "PixArt"},
|
| 47 |
-
"pixart-sigma": {"color": "#ef4444", "category": "PixArt"},
|
| 48 |
-
"flux": {"color": "#06b6d4", "category": "Flux"},
|
| 49 |
}
|
| 50 |
|
| 51 |
TAGS = {
|
|
@@ -101,22 +95,46 @@ def format_tasks_for_table(tasks_str: str) -> str:
|
|
| 101 |
tasks = [task.strip() for task in tasks_str.split(',')]
|
| 102 |
return ' '.join([create_task_tag(task) for task in tasks])
|
| 103 |
|
| 104 |
-
def
|
| 105 |
-
"""Update the task
|
| 106 |
if model_type == "transformers":
|
| 107 |
-
return
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
| 112 |
)
|
| 113 |
else: # diffusers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
return gr.Dropdown(
|
| 115 |
-
choices=
|
| 116 |
-
value=
|
| 117 |
-
label="
|
| 118 |
visible=True
|
| 119 |
)
|
|
|
|
| 120 |
|
| 121 |
def toggle_custom_repo_box(pr_destinations: List[str]):
|
| 122 |
"""Show or hide the custom repo ID textbox based on checkbox selection."""
|
|
@@ -125,7 +143,7 @@ def toggle_custom_repo_box(pr_destinations: List[str]):
|
|
| 125 |
else:
|
| 126 |
return gr.Textbox(visible=False, value="")
|
| 127 |
|
| 128 |
-
def neuron_export(model_id: str, model_type: str, task_or_pipeline: str,
|
| 129 |
pr_destinations: List[str], custom_repo_id: str, custom_cache_repo: str, oauth_token: gr.OAuthToken):
|
| 130 |
|
| 131 |
log_buffer = ""
|
|
@@ -169,8 +187,11 @@ def neuron_export(model_id: str, model_type: str, task_or_pipeline: str,
|
|
| 169 |
}
|
| 170 |
|
| 171 |
# The convert function is a generator, so we iterate through its messages
|
| 172 |
-
for status_code, message in convert(
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
| 174 |
if isinstance(message, str):
|
| 175 |
yield log(message)
|
| 176 |
else: # It's the final result dictionary
|
|
@@ -211,7 +232,7 @@ Simply provide a model ID from the Hugging Face Hub, and choose your desired out
|
|
| 211 |
### ✨ Key Features
|
| 212 |
|
| 213 |
* **🚀 Create a New Optimized Repo**: Automatically converts your model and uploads it to a new repository under your username (e.g., `your-username/model-name-neuron`).
|
| 214 |
-
* **🔗 Link Back to Original**: Creates a Pull Request on the original model
|
| 215 |
* **🛠️ PR to a Custom Repo**: For custom workflows, you can create a Pull Request to add the optimized files directly into an existing repository you own.
|
| 216 |
* **📦 Contribute to Cache**: Contribute the generated compilation artifacts to a centralized cache repository (or your own private cache), helping avoid recompilation of already exported models.
|
| 217 |
|
|
@@ -324,6 +345,12 @@ with gr.Blocks(css=CUSTOM_CSS, theme=gr.themes.Soft()) as demo:
|
|
| 324 |
placeholder="Search for a model on the Hub...",
|
| 325 |
search_type="model",
|
| 326 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
task_dropdown = gr.Dropdown(
|
| 328 |
choices=ALL_TRANSFORMER_TASKS,
|
| 329 |
value="auto",
|
|
@@ -336,8 +363,14 @@ with gr.Blocks(css=CUSTOM_CSS, theme=gr.themes.Soft()) as demo:
|
|
| 336 |
|
| 337 |
# Event Handlers
|
| 338 |
model_type.change(
|
| 339 |
-
fn=
|
| 340 |
inputs=[model_type],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
outputs=[task_dropdown]
|
| 342 |
)
|
| 343 |
|
|
@@ -351,7 +384,8 @@ with gr.Blocks(css=CUSTOM_CSS, theme=gr.themes.Soft()) as demo:
|
|
| 351 |
fn=neuron_export,
|
| 352 |
inputs=[
|
| 353 |
input_model,
|
| 354 |
-
model_type,
|
|
|
|
| 355 |
task_dropdown,
|
| 356 |
pr_destinations_checkbox,
|
| 357 |
custom_repo_id_textbox,
|
|
|
|
| 4 |
from typing import Optional, Union, List
|
| 5 |
import gradio as gr
|
| 6 |
from huggingface_hub import HfApi, Repository
|
| 7 |
+
from optimum_neuron_export import convert, DIFFUSION_PIPELINE_MAPPING
|
| 8 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
| 9 |
from apscheduler.schedulers.background import BackgroundScheduler
|
| 10 |
|
|
|
|
| 30 |
"sentence-similarity": {"color": "#06b6d4", "category": "Similarity"},
|
| 31 |
}
|
| 32 |
|
| 33 |
+
# Define diffusion pipeline types - updated structure
|
| 34 |
DIFFUSION_PIPELINES = {
|
| 35 |
+
"stable-diffusion": {"color": "#ec4899", "category": "Stable Diffusion", "tasks": ["text-to-image", "image-to-image", "inpaint"]},
|
| 36 |
+
"stable-diffusion-xl": {"color": "#10b981", "category": "Stable Diffusion XL", "tasks": ["text-to-image", "image-to-image", "inpaint"]},
|
| 37 |
+
"sdxl-turbo": {"color": "#f59e0b", "category": "SDXL Turbo", "tasks": ["text-to-image", "image-to-image", "inpaint"]},
|
| 38 |
+
"lcm": {"color": "#8b5cf6", "category": "LCM", "tasks": ["text-to-image"]},
|
| 39 |
+
"pixart-alpha": {"color": "#ef4444", "category": "PixArt", "tasks": ["text-to-image"]},
|
| 40 |
+
"pixart-sigma": {"color": "#ef4444", "category": "PixArt", "tasks": ["text-to-image"]},
|
| 41 |
+
"flux": {"color": "#06b6d4", "category": "Flux", "tasks": ["text-to-image", "inpaint"]},
|
| 42 |
+
"flux-kont": {"color": "#06b6d4", "category": "Flux Kont", "tasks": ["text-to-image", "image-to-image"]},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
}
|
| 44 |
|
| 45 |
TAGS = {
|
|
|
|
| 95 |
tasks = [task.strip() for task in tasks_str.split(',')]
|
| 96 |
return ' '.join([create_task_tag(task) for task in tasks])
|
| 97 |
|
| 98 |
+
def update_pipeline_and_task_dropdowns(model_type: str):
|
| 99 |
+
"""Update the pipeline and task dropdowns based on selected model type"""
|
| 100 |
if model_type == "transformers":
|
| 101 |
+
return (
|
| 102 |
+
gr.Dropdown(visible=False), # pipeline dropdown hidden
|
| 103 |
+
gr.Dropdown(
|
| 104 |
+
choices=ALL_TRANSFORMER_TASKS,
|
| 105 |
+
value="auto",
|
| 106 |
+
label="Task (auto can infer task from model)",
|
| 107 |
+
visible=True
|
| 108 |
+
)
|
| 109 |
)
|
| 110 |
else: # diffusers
|
| 111 |
+
# Show pipeline dropdown, hide task dropdown initially
|
| 112 |
+
return (
|
| 113 |
+
gr.Dropdown(
|
| 114 |
+
choices=ALL_DIFFUSION_PIPELINES,
|
| 115 |
+
value="stable-diffusion",
|
| 116 |
+
label="Pipeline Type",
|
| 117 |
+
visible=True
|
| 118 |
+
),
|
| 119 |
+
gr.Dropdown(
|
| 120 |
+
choices=DIFFUSION_PIPELINES["stable-diffusion"]["tasks"],
|
| 121 |
+
value=DIFFUSION_PIPELINES["stable-diffusion"]["tasks"][0],
|
| 122 |
+
label="Task",
|
| 123 |
+
visible=True
|
| 124 |
+
)
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
def update_task_dropdown_for_pipeline(pipeline_name: str):
|
| 128 |
+
"""Update task dropdown based on selected pipeline"""
|
| 129 |
+
if pipeline_name in DIFFUSION_PIPELINES:
|
| 130 |
+
tasks = DIFFUSION_PIPELINES[pipeline_name]["tasks"]
|
| 131 |
return gr.Dropdown(
|
| 132 |
+
choices=tasks,
|
| 133 |
+
value=tasks[0] if tasks else None,
|
| 134 |
+
label="Task",
|
| 135 |
visible=True
|
| 136 |
)
|
| 137 |
+
return gr.Dropdown(visible=False)
|
| 138 |
|
| 139 |
def toggle_custom_repo_box(pr_destinations: List[str]):
|
| 140 |
"""Show or hide the custom repo ID textbox based on checkbox selection."""
|
|
|
|
| 143 |
else:
|
| 144 |
return gr.Textbox(visible=False, value="")
|
| 145 |
|
| 146 |
+
def neuron_export(model_id: str, model_type: str, pipeline_name: str, task_or_pipeline: str,
|
| 147 |
pr_destinations: List[str], custom_repo_id: str, custom_cache_repo: str, oauth_token: gr.OAuthToken):
|
| 148 |
|
| 149 |
log_buffer = ""
|
|
|
|
| 187 |
}
|
| 188 |
|
| 189 |
# The convert function is a generator, so we iterate through its messages
|
| 190 |
+
for status_code, message in convert(
|
| 191 |
+
api, model_id, task_or_pipeline, model_type,
|
| 192 |
+
token=oauth_token.token, pr_options=pr_options,
|
| 193 |
+
pipeline_name=pipeline_name if model_type == "diffusers" else None
|
| 194 |
+
):
|
| 195 |
if isinstance(message, str):
|
| 196 |
yield log(message)
|
| 197 |
else: # It's the final result dictionary
|
|
|
|
| 232 |
### ✨ Key Features
|
| 233 |
|
| 234 |
* **🚀 Create a New Optimized Repo**: Automatically converts your model and uploads it to a new repository under your username (e.g., `your-username/model-name-neuron`).
|
| 235 |
+
* **🔗 Link Back to Original**: Creates a Pull Request on the original model's repository to add a link to your optimized version, making it easier for the community to discover.
|
| 236 |
* **🛠️ PR to a Custom Repo**: For custom workflows, you can create a Pull Request to add the optimized files directly into an existing repository you own.
|
| 237 |
* **📦 Contribute to Cache**: Contribute the generated compilation artifacts to a centralized cache repository (or your own private cache), helping avoid recompilation of already exported models.
|
| 238 |
|
|
|
|
| 345 |
placeholder="Search for a model on the Hub...",
|
| 346 |
search_type="model",
|
| 347 |
)
|
| 348 |
+
pipeline_dropdown = gr.Dropdown(
|
| 349 |
+
choices=ALL_DIFFUSION_PIPELINES,
|
| 350 |
+
value="stable-diffusion",
|
| 351 |
+
label="Pipeline Type",
|
| 352 |
+
visible=False
|
| 353 |
+
)
|
| 354 |
task_dropdown = gr.Dropdown(
|
| 355 |
choices=ALL_TRANSFORMER_TASKS,
|
| 356 |
value="auto",
|
|
|
|
| 363 |
|
| 364 |
# Event Handlers
|
| 365 |
model_type.change(
|
| 366 |
+
fn=update_pipeline_and_task_dropdowns,
|
| 367 |
inputs=[model_type],
|
| 368 |
+
outputs=[pipeline_dropdown, task_dropdown]
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
pipeline_dropdown.change(
|
| 372 |
+
fn=update_task_dropdown_for_pipeline,
|
| 373 |
+
inputs=[pipeline_dropdown],
|
| 374 |
outputs=[task_dropdown]
|
| 375 |
)
|
| 376 |
|
|
|
|
| 384 |
fn=neuron_export,
|
| 385 |
inputs=[
|
| 386 |
input_model,
|
| 387 |
+
model_type,
|
| 388 |
+
pipeline_dropdown,
|
| 389 |
task_dropdown,
|
| 390 |
pr_destinations_checkbox,
|
| 391 |
custom_repo_id_textbox,
|