Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,7 +14,7 @@ import random
|
|
| 14 |
|
| 15 |
os.system("pip install -e ./controlnet_aux")
|
| 16 |
|
| 17 |
-
from controlnet_aux import OpenposeDetector, CannyDetector
|
| 18 |
from depth_anything_v2.dpt import DepthAnythingV2
|
| 19 |
|
| 20 |
from huggingface_hub import hf_hub_download
|
|
@@ -51,16 +51,14 @@ from diffusers import FluxControlNetPipeline, FluxControlNetModel
|
|
| 51 |
from diffusers.models import FluxMultiControlNetModel
|
| 52 |
|
| 53 |
base_model = 'black-forest-labs/FLUX.1-dev'
|
| 54 |
-
controlnet_model = 'Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro'
|
| 55 |
controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
|
| 56 |
controlnet = FluxMultiControlNetModel([controlnet])
|
| 57 |
pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
|
| 58 |
pipe.to("cuda")
|
| 59 |
|
| 60 |
-
mode_mapping = {"canny":0, "tile":1, "depth":2, "blur":3, "openpose":4, "gray":5, "low quality": 6}
|
| 61 |
-
strength_mapping = {"canny":0.65, "tile":0.45, "depth":0.55, "blur":0.45, "openpose":0.55, "gray":0.45, "low quality": 0.4}
|
| 62 |
-
|
| 63 |
canny = CannyDetector()
|
|
|
|
| 64 |
open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
| 65 |
|
| 66 |
torch.backends.cuda.matmul.allow_tf32 = True
|
|
@@ -90,6 +88,10 @@ def extract_canny(image):
|
|
| 90 |
processed_image_canny = canny(image)
|
| 91 |
return processed_image_canny
|
| 92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
def apply_gaussian_blur(image, kernel_size=(21, 21)):
|
| 94 |
image = convert_from_image_to_cv2(image)
|
| 95 |
blurred_image = convert_from_cv2_to_image(cv2.GaussianBlur(image, kernel_size, 0))
|
|
@@ -144,27 +146,21 @@ def resize_img(input_image, max_side=768, min_side=512, size=None,
|
|
| 144 |
return input_image
|
| 145 |
|
| 146 |
@spaces.GPU(duration=180)
|
| 147 |
-
def infer(cond_in, image_in, prompt, inference_steps, guidance_scale, control_mode, control_strength, seed, progress=gr.Progress(track_tqdm=True)):
|
| 148 |
-
|
| 149 |
-
control_mode_num = mode_mapping[control_mode]
|
| 150 |
-
|
| 151 |
if cond_in is None:
|
| 152 |
if image_in is not None:
|
| 153 |
image_in = resize_img(load_image(image_in))
|
| 154 |
if control_mode == "canny":
|
| 155 |
control_image = extract_canny(image_in)
|
|
|
|
|
|
|
| 156 |
elif control_mode == "depth":
|
| 157 |
control_image = extract_depth(image_in)
|
| 158 |
elif control_mode == "openpose":
|
| 159 |
control_image = extract_openpose(image_in)
|
| 160 |
-
elif control_mode == "blur":
|
| 161 |
-
control_image = apply_gaussian_blur(image_in)
|
| 162 |
-
elif control_mode == "low quality":
|
| 163 |
-
control_image = add_gaussian_noise(image_in)
|
| 164 |
elif control_mode == "gray":
|
| 165 |
control_image = convert_to_grayscale(image_in)
|
| 166 |
-
elif control_mode == "tile":
|
| 167 |
-
control_image = tile(image_in)
|
| 168 |
else:
|
| 169 |
control_image = resize_img(load_image(cond_in))
|
| 170 |
|
|
@@ -173,7 +169,7 @@ def infer(cond_in, image_in, prompt, inference_steps, guidance_scale, control_mo
|
|
| 173 |
image = pipe(
|
| 174 |
prompt,
|
| 175 |
control_image=[control_image],
|
| 176 |
-
|
| 177 |
width=width,
|
| 178 |
height=height,
|
| 179 |
controlnet_conditioning_scale=[control_strength],
|
|
@@ -196,9 +192,9 @@ css="""
|
|
| 196 |
with gr.Blocks(css=css) as demo:
|
| 197 |
with gr.Column(elem_id="col-container"):
|
| 198 |
gr.Markdown("""
|
| 199 |
-
# FLUX.1-dev-ControlNet-Union-Pro
|
| 200 |
-
A unified ControlNet for FLUX.1-dev model from
|
| 201 |
-
The recommended strength: {"canny":0.
|
| 202 |
""")
|
| 203 |
|
| 204 |
with gr.Column():
|
|
@@ -214,7 +210,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 214 |
|
| 215 |
with gr.Accordion("Controlnet"):
|
| 216 |
control_mode = gr.Radio(
|
| 217 |
-
["canny", "
|
| 218 |
info="select the control mode, one for all"
|
| 219 |
)
|
| 220 |
|
|
@@ -223,7 +219,15 @@ with gr.Blocks(css=css) as demo:
|
|
| 223 |
minimum=0,
|
| 224 |
maximum=1.0,
|
| 225 |
step=0.05,
|
| 226 |
-
value=0.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
)
|
| 228 |
|
| 229 |
seed = gr.Slider(
|
|
@@ -239,7 +243,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 239 |
with gr.Column():
|
| 240 |
with gr.Row():
|
| 241 |
inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=24)
|
| 242 |
-
guidance_scale = gr.Slider(label="Guidance scale", minimum=1.0, maximum=
|
| 243 |
|
| 244 |
submit_btn = gr.Button("Submit")
|
| 245 |
|
|
@@ -255,7 +259,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 255 |
api_name=False
|
| 256 |
).then(
|
| 257 |
fn = infer,
|
| 258 |
-
inputs = [cond_in, image_in, prompt, inference_steps, guidance_scale, control_mode, control_strength, seed],
|
| 259 |
outputs = [result, processed_cond],
|
| 260 |
show_api=False
|
| 261 |
)
|
|
|
|
| 14 |
|
| 15 |
os.system("pip install -e ./controlnet_aux")
|
| 16 |
|
| 17 |
+
from controlnet_aux import OpenposeDetector, CannyDetector, AnylineDetector
|
| 18 |
from depth_anything_v2.dpt import DepthAnythingV2
|
| 19 |
|
| 20 |
from huggingface_hub import hf_hub_download
|
|
|
|
| 51 |
from diffusers.models import FluxMultiControlNetModel
|
| 52 |
|
| 53 |
base_model = 'black-forest-labs/FLUX.1-dev'
|
| 54 |
+
controlnet_model = 'Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro-2.0'
|
| 55 |
controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.bfloat16)
|
| 56 |
controlnet = FluxMultiControlNetModel([controlnet])
|
| 57 |
pipe = FluxControlNetPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16)
|
| 58 |
pipe.to("cuda")
|
| 59 |
|
|
|
|
|
|
|
|
|
|
| 60 |
canny = CannyDetector()
|
| 61 |
+
anyline = AnylineDetector.from_pretrained("TheMistoAI/MistoLine/Anyline", filename="MTEED.pth")
|
| 62 |
open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
|
| 63 |
|
| 64 |
torch.backends.cuda.matmul.allow_tf32 = True
|
|
|
|
| 88 |
processed_image_canny = canny(image)
|
| 89 |
return processed_image_canny
|
| 90 |
|
| 91 |
+
def extract_soft_edge(image):
|
| 92 |
+
processed_image_soft_edge = anyline(image, detect_resolution=1280)
|
| 93 |
+
return processed_image_soft_edge
|
| 94 |
+
|
| 95 |
def apply_gaussian_blur(image, kernel_size=(21, 21)):
|
| 96 |
image = convert_from_image_to_cv2(image)
|
| 97 |
blurred_image = convert_from_cv2_to_image(cv2.GaussianBlur(image, kernel_size, 0))
|
|
|
|
| 146 |
return input_image
|
| 147 |
|
| 148 |
@spaces.GPU(duration=180)
|
| 149 |
+
def infer(cond_in, image_in, prompt, inference_steps, guidance_scale, control_mode, control_strength, control_guidance_end, seed, progress=gr.Progress(track_tqdm=True)):
|
| 150 |
+
|
|
|
|
|
|
|
| 151 |
if cond_in is None:
|
| 152 |
if image_in is not None:
|
| 153 |
image_in = resize_img(load_image(image_in))
|
| 154 |
if control_mode == "canny":
|
| 155 |
control_image = extract_canny(image_in)
|
| 156 |
+
elif control_mode == "soft edge":
|
| 157 |
+
control_image = extract_soft_edge(image_in)
|
| 158 |
elif control_mode == "depth":
|
| 159 |
control_image = extract_depth(image_in)
|
| 160 |
elif control_mode == "openpose":
|
| 161 |
control_image = extract_openpose(image_in)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
elif control_mode == "gray":
|
| 163 |
control_image = convert_to_grayscale(image_in)
|
|
|
|
|
|
|
| 164 |
else:
|
| 165 |
control_image = resize_img(load_image(cond_in))
|
| 166 |
|
|
|
|
| 169 |
image = pipe(
|
| 170 |
prompt,
|
| 171 |
control_image=[control_image],
|
| 172 |
+
control_guidance_end=[control_guidance_end],
|
| 173 |
width=width,
|
| 174 |
height=height,
|
| 175 |
controlnet_conditioning_scale=[control_strength],
|
|
|
|
| 192 |
with gr.Blocks(css=css) as demo:
|
| 193 |
with gr.Column(elem_id="col-container"):
|
| 194 |
gr.Markdown("""
|
| 195 |
+
# FLUX.1-dev-ControlNet-Union-Pro-2.0
|
| 196 |
+
A unified ControlNet for FLUX.1-dev model from Shakker Labs. Model card: [Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro-2.0](https://huggingface.co/Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro-2.0). <br />
|
| 197 |
+
The recommended strength: {"canny":0.70, "soft edge":0.70, "depth":0.80, "openpose":0.90, "gray":0.90}, control guidance end: {"canny":0.80, "soft edge":0.80, "depth":0.80, "openpose":0.65, "gray":0.80}
|
| 198 |
""")
|
| 199 |
|
| 200 |
with gr.Column():
|
|
|
|
| 210 |
|
| 211 |
with gr.Accordion("Controlnet"):
|
| 212 |
control_mode = gr.Radio(
|
| 213 |
+
["canny", "soft edge", "depth", "openpose", "gray"], label="Mode", value="canny",
|
| 214 |
info="select the control mode, one for all"
|
| 215 |
)
|
| 216 |
|
|
|
|
| 219 |
minimum=0,
|
| 220 |
maximum=1.0,
|
| 221 |
step=0.05,
|
| 222 |
+
value=0.70,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
control_guidance_end = gr.Slider(
|
| 226 |
+
label="control guidance end",
|
| 227 |
+
minimum=0,
|
| 228 |
+
maximum=1.0,
|
| 229 |
+
step=0.05,
|
| 230 |
+
value=0.80,
|
| 231 |
)
|
| 232 |
|
| 233 |
seed = gr.Slider(
|
|
|
|
| 243 |
with gr.Column():
|
| 244 |
with gr.Row():
|
| 245 |
inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=24)
|
| 246 |
+
guidance_scale = gr.Slider(label="Guidance scale", minimum=1.0, maximum=7.0, step=0.1, value=3.5)
|
| 247 |
|
| 248 |
submit_btn = gr.Button("Submit")
|
| 249 |
|
|
|
|
| 259 |
api_name=False
|
| 260 |
).then(
|
| 261 |
fn = infer,
|
| 262 |
+
inputs = [cond_in, image_in, prompt, inference_steps, guidance_scale, control_mode, control_strength, control_guidance_end, seed],
|
| 263 |
outputs = [result, processed_cond],
|
| 264 |
show_api=False
|
| 265 |
)
|