Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import requests | |
| import io | |
| from PIL import Image | |
| import json | |
| import os | |
| # Load LoRAs from JSON | |
| with open('loras.json', 'r') as f: | |
| loras = json.load(f) | |
| # API call function | |
| def query(payload, api_url, token): | |
| headers = {"Authorization": f"Bearer {token}"} | |
| response = requests.post(api_url, headers=headers, json=payload) | |
| return io.BytesIO(response.content) | |
| # Gradio UI | |
| with gr.Blocks() as demo: # Removed the css argument | |
| title = gr.HTML( | |
| """<h1><img src="https://i.imgur.com/vT48NAO.png" alt="LoRA"> LoRA the Explorer</h1>""", | |
| elem_id="title", | |
| ) | |
| gallery = gr.Gallery( | |
| value=[(item["image"], item["title"]) for item in loras], | |
| label="LoRA Gallery", | |
| allow_preview=False, | |
| columns=3, | |
| elem_id="gallery", | |
| show_share_button=False | |
| ) | |
| prompt = gr.Textbox(label="Prompt", show_label=False, lines=1, max_lines=1, placeholder="Type a prompt after selecting a LoRA", elem_id="prompt") | |
| advanced_options = gr.Accordion("Advanced options", open=False) | |
| weight = gr.Slider(0, 10, value=1, step=0.1, label="LoRA weight") | |
| result = gr.Image(interactive=False, label="Generated Image", elem_id="result-image") | |
| # Define the function to run when the button is clicked | |
| def run_lora(prompt, weight): | |
| selected_lora = loras[0] # You may need to adjust this index if you have multiple models | |
| api_url = f"https://api-inference.huggingface.co/models/{selected_lora['repo']}" | |
| trigger_word = selected_lora["trigger_word"] | |
| token = os.getenv("API_TOKEN") # This will read the API token set in your managed environment | |
| payload = {"inputs": f"{prompt} {trigger_word}"} | |
| print("Calling query function...") | |
| image_bytes = query(payload, api_url, token) | |
| print("Query function executed successfully.") | |
| return Image.open(image_bytes) | |
| print("Starting Gradio UI...") | |
| gr.Interface( | |
| fn=run_lora, | |
| inputs=[prompt, weight], | |
| outputs=[result], | |
| ).launch() | |