import torch from diffusers.pipelines.flux.pipeline_flux import FluxPipeline from PIL import Image import random from wfControl.src.flux.condition import Condition from wfControl.src.flux.generate import generate, seed_everything print("Loading model...") pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ) pipe = pipe.to("cuda") pipe.unload_lora_weights() pipe.load_lora_weights("Yuanshi/OminiControlArt", weight_name="v0/ghibli.safetensors", adapter_name="ghibli") pipe.load_lora_weights("Yuanshi/OminiControlArt", weight_name="v0/irasutoya.safetensors", adapter_name="irasutoya") pipe.load_lora_weights("Yuanshi/OminiControlArt", weight_name="v0/simpsons.safetensors", adapter_name="simpsons") pipe.load_lora_weights("Yuanshi/OminiControlArt", weight_name="v0/snoopy.safetensors", adapter_name="snoopy") def generate_image(image, style, prompt): def resize(img, factor=16): w, h = img.size new_w, new_h = w // factor * factor, h // factor * factor padding_w, padding_h = (w - new_w) // 2, (h - new_h) // 2 img = img.crop((padding_w, padding_h, new_w + padding_w, new_h + padding_h)) return img adapter_name = { "Studio Ghibli": "ghibli", "Irasutoya Illustration": "irasutoya", "The Simpsons": "simpsons", "Snoopy": "snoopy", }.get(style, "ghibli") pipe.set_adapters(adapter_name) factor = 512 / max(image.size) image = resize( image.resize( (int(image.size[0] * factor), int(image.size[1] * factor)), Image.LANCZOS, ) ) delta = -image.size[0] // 16 condition = Condition("subject", image, position_delta=(0, delta)) seed = random.randint(0, 2**32 - 1) seed_everything(seed) result_img = generate( pipe, prompt=prompt, conditions=[condition], num_inference_steps=20, width=640, height=640, image_guidance_scale=1.0, default_lora=True, max_sequence_length=32, ).images[0] return result_img