Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,6 @@ import spaces
|
|
| 3 |
from clip_slider_pipeline import CLIPSliderFlux
|
| 4 |
from diffusers import FluxPipeline
|
| 5 |
import torch
|
| 6 |
-
import time
|
| 7 |
import numpy as np
|
| 8 |
import cv2
|
| 9 |
from PIL import Image
|
|
@@ -39,8 +38,7 @@ def generate(slider_x, prompt, seed, iterations, steps, guidance_scale,
|
|
| 39 |
controlnet_scale= None, ip_adapter_scale=None,
|
| 40 |
|
| 41 |
):
|
| 42 |
-
|
| 43 |
-
start_time = time.time()
|
| 44 |
# check if avg diff for directions need to be re-calculated
|
| 45 |
print("slider_x", slider_x)
|
| 46 |
print("x_concept_1", x_concept_1, "x_concept_2", x_concept_2)
|
|
@@ -49,9 +47,6 @@ def generate(slider_x, prompt, seed, iterations, steps, guidance_scale,
|
|
| 49 |
avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1], num_iterations=iterations).to(torch.float16)
|
| 50 |
x_concept_1, x_concept_2 = slider_x[0], slider_x[1]
|
| 51 |
|
| 52 |
-
print(f"direction time: {end_time - start_time:.2f} ms")
|
| 53 |
-
|
| 54 |
-
start_time = time.time()
|
| 55 |
|
| 56 |
if img2img_type=="controlnet canny" and img is not None:
|
| 57 |
control_img = process_controlnet_img(img)
|
|
@@ -61,8 +56,6 @@ def generate(slider_x, prompt, seed, iterations, steps, guidance_scale,
|
|
| 61 |
else: # text to image
|
| 62 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff)
|
| 63 |
|
| 64 |
-
end_time = time.time()
|
| 65 |
-
print(f"generation time: {end_time - start_time:.2f} ms")
|
| 66 |
|
| 67 |
comma_concepts_x = ', '.join(slider_x)
|
| 68 |
|
|
|
|
| 3 |
from clip_slider_pipeline import CLIPSliderFlux
|
| 4 |
from diffusers import FluxPipeline
|
| 5 |
import torch
|
|
|
|
| 6 |
import numpy as np
|
| 7 |
import cv2
|
| 8 |
from PIL import Image
|
|
|
|
| 38 |
controlnet_scale= None, ip_adapter_scale=None,
|
| 39 |
|
| 40 |
):
|
| 41 |
+
|
|
|
|
| 42 |
# check if avg diff for directions need to be re-calculated
|
| 43 |
print("slider_x", slider_x)
|
| 44 |
print("x_concept_1", x_concept_1, "x_concept_2", x_concept_2)
|
|
|
|
| 47 |
avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1], num_iterations=iterations).to(torch.float16)
|
| 48 |
x_concept_1, x_concept_2 = slider_x[0], slider_x[1]
|
| 49 |
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
if img2img_type=="controlnet canny" and img is not None:
|
| 52 |
control_img = process_controlnet_img(img)
|
|
|
|
| 56 |
else: # text to image
|
| 57 |
image = clip_slider.generate(prompt, guidance_scale=guidance_scale, scale=0, scale_2nd=0, seed=seed, num_inference_steps=steps, avg_diff=avg_diff)
|
| 58 |
|
|
|
|
|
|
|
| 59 |
|
| 60 |
comma_concepts_x = ', '.join(slider_x)
|
| 61 |
|