Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -43,11 +43,12 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 43 |
|
| 44 |
# ๊ณตํต FLUX ๋ชจ๋ธ ๋ก๋
|
| 45 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 46 |
-
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype)
|
|
|
|
| 47 |
|
| 48 |
# LoRA๋ฅผ ์ํ ์ค์
|
| 49 |
-
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
|
| 50 |
-
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype)
|
| 51 |
|
| 52 |
# Image-to-Image ํ์ดํ๋ผ์ธ ์ค์
|
| 53 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
|
@@ -58,30 +59,26 @@ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
|
| 58 |
tokenizer=pipe.tokenizer,
|
| 59 |
text_encoder_2=pipe.text_encoder_2,
|
| 60 |
tokenizer_2=pipe.tokenizer_2,
|
| 61 |
-
torch_dtype=dtype
|
| 62 |
-
|
|
|
|
| 63 |
|
| 64 |
# Upscale์ ์ํ ControlNet ์ค์
|
| 65 |
controlnet = FluxControlNetModel.from_pretrained(
|
| 66 |
-
"jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
|
| 67 |
-
)
|
| 68 |
|
| 69 |
# Upscale ํ์ดํ๋ผ์ธ ์ค์ (๊ธฐ์กด pipe ์ฌ์ฌ์ฉ)
|
| 70 |
pipe_upscale = FluxControlNetPipeline(
|
| 71 |
vae=pipe.vae,
|
| 72 |
text_encoder=pipe.text_encoder,
|
| 73 |
-
text_encoder_2=pipe.text_encoder_2,
|
| 74 |
tokenizer=pipe.tokenizer,
|
| 75 |
-
tokenizer_2=pipe.tokenizer_2,
|
| 76 |
transformer=pipe.transformer,
|
| 77 |
scheduler=pipe.scheduler,
|
| 78 |
controlnet=controlnet
|
| 79 |
-
)
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
|
| 86 |
MAX_SEED = 2**32 - 1
|
| 87 |
MAX_PIXEL_BUDGET = 1024 * 1024
|
|
@@ -586,16 +583,14 @@ def infer_upscale(
|
|
| 586 |
gr.Info("Upscaling image...")
|
| 587 |
# ๋ชจ๋ ํ
์๋ฅผ ๋์ผํ ๋๋ฐ์ด์ค๋ก ์ด๋
|
| 588 |
pipe_upscale.to(device)
|
| 589 |
-
control_image = control_image.to(device)
|
| 590 |
|
| 591 |
image = pipe_upscale(
|
| 592 |
prompt="",
|
| 593 |
-
|
| 594 |
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 595 |
num_inference_steps=num_inference_steps,
|
| 596 |
guidance_scale=3.5,
|
| 597 |
-
height=control_image.size[1],
|
| 598 |
-
width=control_image.size[0],
|
| 599 |
generator=generator,
|
| 600 |
).images[0]
|
| 601 |
|
|
@@ -610,11 +605,11 @@ def infer_upscale(
|
|
| 610 |
return image, seed
|
| 611 |
except Exception as e:
|
| 612 |
print(f"Error in infer_upscale: {str(e)}")
|
| 613 |
-
return
|
| 614 |
|
| 615 |
def check_upscale_input(input_image, *args):
|
| 616 |
if input_image is None:
|
| 617 |
-
|
| 618 |
return input_image, *args
|
| 619 |
|
| 620 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
|
|
|
|
| 43 |
|
| 44 |
# ๊ณตํต FLUX ๋ชจ๋ธ ๋ก๋
|
| 45 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 46 |
+
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, low_cpu_mem_usage=True)
|
| 47 |
+
pipe.to(device)
|
| 48 |
|
| 49 |
# LoRA๋ฅผ ์ํ ์ค์
|
| 50 |
+
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype, low_cpu_mem_usage=True)
|
| 51 |
+
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype, low_cpu_mem_usage=True)
|
| 52 |
|
| 53 |
# Image-to-Image ํ์ดํ๋ผ์ธ ์ค์
|
| 54 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
|
|
|
| 59 |
tokenizer=pipe.tokenizer,
|
| 60 |
text_encoder_2=pipe.text_encoder_2,
|
| 61 |
tokenizer_2=pipe.tokenizer_2,
|
| 62 |
+
torch_dtype=dtype,
|
| 63 |
+
low_cpu_mem_usage=True
|
| 64 |
+
)
|
| 65 |
|
| 66 |
# Upscale์ ์ํ ControlNet ์ค์
|
| 67 |
controlnet = FluxControlNetModel.from_pretrained(
|
| 68 |
+
"jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True
|
| 69 |
+
)
|
| 70 |
|
| 71 |
# Upscale ํ์ดํ๋ผ์ธ ์ค์ (๊ธฐ์กด pipe ์ฌ์ฌ์ฉ)
|
| 72 |
pipe_upscale = FluxControlNetPipeline(
|
| 73 |
vae=pipe.vae,
|
| 74 |
text_encoder=pipe.text_encoder,
|
| 75 |
+
text_encoder_2=pipe.text_encoder_2,
|
| 76 |
tokenizer=pipe.tokenizer,
|
| 77 |
+
tokenizer_2=pipe.tokenizer_2,
|
| 78 |
transformer=pipe.transformer,
|
| 79 |
scheduler=pipe.scheduler,
|
| 80 |
controlnet=controlnet
|
| 81 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
MAX_SEED = 2**32 - 1
|
| 84 |
MAX_PIXEL_BUDGET = 1024 * 1024
|
|
|
|
| 583 |
gr.Info("Upscaling image...")
|
| 584 |
# ๋ชจ๋ ํ
์๋ฅผ ๋์ผํ ๋๋ฐ์ด์ค๋ก ์ด๋
|
| 585 |
pipe_upscale.to(device)
|
| 586 |
+
control_image = torch.from_numpy(np.array(control_image)).permute(2, 0, 1).float().to(device)
|
| 587 |
|
| 588 |
image = pipe_upscale(
|
| 589 |
prompt="",
|
| 590 |
+
image=control_image,
|
| 591 |
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 592 |
num_inference_steps=num_inference_steps,
|
| 593 |
guidance_scale=3.5,
|
|
|
|
|
|
|
| 594 |
generator=generator,
|
| 595 |
).images[0]
|
| 596 |
|
|
|
|
| 605 |
return image, seed
|
| 606 |
except Exception as e:
|
| 607 |
print(f"Error in infer_upscale: {str(e)}")
|
| 608 |
+
return gr.Error(f"Upscaling failed: {str(e)}"), seed
|
| 609 |
|
| 610 |
def check_upscale_input(input_image, *args):
|
| 611 |
if input_image is None:
|
| 612 |
+
return gr.Error("Please provide an input image for upscaling."), *args
|
| 613 |
return input_image, *args
|
| 614 |
|
| 615 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
|