Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -32,8 +32,11 @@ pipe_schnell = DiffusionPipeline.from_pretrained(
|
|
| 32 |
def run_dev_hyper(prompt):
|
| 33 |
print("dev_hyper")
|
| 34 |
pipe_dev.to("cuda")
|
|
|
|
| 35 |
pipe_dev.load_lora_weights(hyper_lora)
|
|
|
|
| 36 |
image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
|
|
|
|
| 37 |
pipe_dev.unload_lora_weights()
|
| 38 |
return image
|
| 39 |
|
|
@@ -41,8 +44,11 @@ def run_dev_hyper(prompt):
|
|
| 41 |
def run_dev_turbo(prompt):
|
| 42 |
print("dev_turbo")
|
| 43 |
pipe_dev.to("cuda")
|
|
|
|
| 44 |
pipe_dev.load_lora_weights(turbo_lora)
|
|
|
|
| 45 |
image = pipe_dev(prompt, num_inference_steps=8).images[0]
|
|
|
|
| 46 |
pipe_dev.unload_lora_weights()
|
| 47 |
return image
|
| 48 |
|
|
@@ -50,7 +56,9 @@ def run_dev_turbo(prompt):
|
|
| 50 |
def run_schnell(prompt):
|
| 51 |
print("schnell")
|
| 52 |
pipe_schnell.to("cuda")
|
|
|
|
| 53 |
image = pipe_schnell(prompt, num_inference_steps=4).images[0]
|
|
|
|
| 54 |
return image
|
| 55 |
|
| 56 |
def run_parallel_models(prompt):
|
|
|
|
| 32 |
def run_dev_hyper(prompt):
|
| 33 |
print("dev_hyper")
|
| 34 |
pipe_dev.to("cuda")
|
| 35 |
+
print(hyper_lora)
|
| 36 |
pipe_dev.load_lora_weights(hyper_lora)
|
| 37 |
+
print("Loaded hyper lora!")
|
| 38 |
image = pipe_dev(prompt, num_inference_steps=8, joint_attention_kwargs={"scale": 0.125}).images[0]
|
| 39 |
+
print("Ran!")
|
| 40 |
pipe_dev.unload_lora_weights()
|
| 41 |
return image
|
| 42 |
|
|
|
|
| 44 |
def run_dev_turbo(prompt):
|
| 45 |
print("dev_turbo")
|
| 46 |
pipe_dev.to("cuda")
|
| 47 |
+
print(turbo_lora)
|
| 48 |
pipe_dev.load_lora_weights(turbo_lora)
|
| 49 |
+
print("Loaded turbo lora!")
|
| 50 |
image = pipe_dev(prompt, num_inference_steps=8).images[0]
|
| 51 |
+
print("Ran!")
|
| 52 |
pipe_dev.unload_lora_weights()
|
| 53 |
return image
|
| 54 |
|
|
|
|
| 56 |
def run_schnell(prompt):
|
| 57 |
print("schnell")
|
| 58 |
pipe_schnell.to("cuda")
|
| 59 |
+
print("schnell on gpu")
|
| 60 |
image = pipe_schnell(prompt, num_inference_steps=4).images[0]
|
| 61 |
+
print("Ran!")
|
| 62 |
return image
|
| 63 |
|
| 64 |
def run_parallel_models(prompt):
|