Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -23,8 +23,7 @@ torch.cuda.empty_cache()
|
|
| 23 |
|
| 24 |
# Inference function
|
| 25 |
@spaces.GPU(duration=25)
|
| 26 |
-
def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2):
|
| 27 |
-
|
| 28 |
if randomize_seed:
|
| 29 |
seed = random.randint(0, MAX_SEED)
|
| 30 |
generator = torch.Generator().manual_seed(seed)
|
|
@@ -43,7 +42,6 @@ def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT,
|
|
| 43 |
latency = f"Latency: {(time.time()-start_time):.2f} seconds"
|
| 44 |
yield img, seed, latency
|
| 45 |
|
| 46 |
-
|
| 47 |
# Example prompts
|
| 48 |
examples = [
|
| 49 |
"a tiny astronaut hatching from an egg on the moon",
|
|
@@ -72,11 +70,13 @@ with gr.Blocks() as demo:
|
|
| 72 |
show_label=False,
|
| 73 |
container=False,
|
| 74 |
)
|
|
|
|
| 75 |
enhanceBtn = gr.Button("🚀 Enhance Image")
|
| 76 |
|
| 77 |
with gr.Column("Advanced Options"):
|
| 78 |
with gr.Row():
|
| 79 |
-
|
|
|
|
| 80 |
with gr.Row():
|
| 81 |
seed = gr.Number(label="Seed", value=42, precision=0)
|
| 82 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=False)
|
|
@@ -96,26 +96,60 @@ with gr.Blocks() as demo:
|
|
| 96 |
cache_examples="lazy"
|
| 97 |
)
|
| 98 |
|
| 99 |
-
# Event handling - Trigger image generation on button click or input change
|
| 100 |
enhanceBtn.click(
|
| 101 |
fn=generate_image,
|
| 102 |
inputs=[prompt, seed, width, height],
|
| 103 |
outputs=[result, seed, latency],
|
| 104 |
show_progress="hidden",
|
| 105 |
-
|
| 106 |
queue=False
|
| 107 |
)
|
| 108 |
|
| 109 |
-
|
| 110 |
-
triggers=[prompt.submit, prompt.input, width.input, height.input, num_inference_steps.input],
|
| 111 |
fn=generate_image,
|
| 112 |
inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
|
| 113 |
outputs=[result, seed, latency],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
show_progress="hidden",
|
| 115 |
-
|
| 116 |
-
trigger_mode="always_last",
|
| 117 |
queue=False
|
| 118 |
)
|
| 119 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
# Launch the app
|
| 121 |
-
demo.launch()
|
|
|
|
| 23 |
|
| 24 |
# Inference function
|
| 25 |
@spaces.GPU(duration=25)
|
| 26 |
+
def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 27 |
if randomize_seed:
|
| 28 |
seed = random.randint(0, MAX_SEED)
|
| 29 |
generator = torch.Generator().manual_seed(seed)
|
|
|
|
| 42 |
latency = f"Latency: {(time.time()-start_time):.2f} seconds"
|
| 43 |
yield img, seed, latency
|
| 44 |
|
|
|
|
| 45 |
# Example prompts
|
| 46 |
examples = [
|
| 47 |
"a tiny astronaut hatching from an egg on the moon",
|
|
|
|
| 70 |
show_label=False,
|
| 71 |
container=False,
|
| 72 |
)
|
| 73 |
+
generateBtn = gr.Button("🖼️ Generate Image", visible=False)
|
| 74 |
enhanceBtn = gr.Button("🚀 Enhance Image")
|
| 75 |
|
| 76 |
with gr.Column("Advanced Options"):
|
| 77 |
with gr.Row():
|
| 78 |
+
realtime = gr.Checkbox(label="Realtime Toggler", info="If TRUE then uses more GPU but create image in realtime.", value=True)
|
| 79 |
+
latency = gr.Text(label="Latency")
|
| 80 |
with gr.Row():
|
| 81 |
seed = gr.Number(label="Seed", value=42, precision=0)
|
| 82 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=False)
|
|
|
|
| 96 |
cache_examples="lazy"
|
| 97 |
)
|
| 98 |
|
|
|
|
| 99 |
enhanceBtn.click(
|
| 100 |
fn=generate_image,
|
| 101 |
inputs=[prompt, seed, width, height],
|
| 102 |
outputs=[result, seed, latency],
|
| 103 |
show_progress="hidden",
|
| 104 |
+
api_name=False,
|
| 105 |
queue=False
|
| 106 |
)
|
| 107 |
|
| 108 |
+
generateBtn.click(
|
|
|
|
| 109 |
fn=generate_image,
|
| 110 |
inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
|
| 111 |
outputs=[result, seed, latency],
|
| 112 |
+
show_progress="full",
|
| 113 |
+
api_name=False,
|
| 114 |
+
queue=False
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def update_ui(realtime_enabled):
|
| 118 |
+
return {
|
| 119 |
+
prompt: gr.update(interactive=True),
|
| 120 |
+
generateBtn: gr.update(visible=not realtime_enabled)
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
realtime.change(
|
| 124 |
+
fn=update_ui,
|
| 125 |
+
inputs=[realtime],
|
| 126 |
+
outputs=[prompt, generateBtn],
|
| 127 |
+
queue=False
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
def realtime_generation(*args):
|
| 131 |
+
if args[0]: # If realtime is enabled
|
| 132 |
+
return next(generate_image(*args[1:]))
|
| 133 |
+
|
| 134 |
+
prompt.submit(
|
| 135 |
+
fn=realtime_generation,
|
| 136 |
+
inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps],
|
| 137 |
+
outputs=[result, seed, latency],
|
| 138 |
show_progress="hidden",
|
| 139 |
+
api_name=False,
|
|
|
|
| 140 |
queue=False
|
| 141 |
)
|
| 142 |
|
| 143 |
+
for component in [prompt, width, height, num_inference_steps]:
|
| 144 |
+
component.input(
|
| 145 |
+
fn=realtime_generation,
|
| 146 |
+
inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps],
|
| 147 |
+
outputs=[result, seed, latency],
|
| 148 |
+
show_progress="hidden",
|
| 149 |
+
api_name=False,
|
| 150 |
+
trigger_mode="always_last",
|
| 151 |
+
queue=False
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
# Launch the app
|
| 155 |
+
demo.launch()
|