Multi_Ref_Edit / app.py
bo.l
update rl model
e552c64
import gradio as gr
import numpy as np
import random, torch
import spaces # [uncomment to use ZeroGPU]
from PIL import Image
from kontext.pipeline_flux_kontext import FluxKontextPipeline
from kontext.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler
from diffusers import FluxTransformer2DModel
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
# ---------------------------
# utils
# ---------------------------
def resize_by_bucket(images_pil, resolution=512):
assert len(images_pil) > 0, "images_pil 不能为空"
bucket_override = [
(336, 784), (344, 752), (360, 728), (376, 696),
(400, 664), (416, 624), (440, 592), (472, 552),
(512, 512),
(552, 472), (592, 440), (624, 416), (664, 400),
(696, 376), (728, 360), (752, 344), (784, 336),
]
# 按目标分辨率缩放,并对齐到 16
bucket_override = [(int(h / 512 * resolution), int(w / 512 * resolution)) for h, w in bucket_override]
bucket_override = [(h // 16 * 16, w // 16 * 16) for h, w in bucket_override]
aspect_ratios = [img.height / img.width for img in images_pil]
mean_aspect_ratio = float(np.mean(aspect_ratios))
new_h, new_w = bucket_override[0]
min_aspect_diff = abs(new_h / new_w - mean_aspect_ratio)
for h, w in bucket_override:
aspect_diff = abs(h / w - mean_aspect_ratio)
if aspect_diff < min_aspect_diff:
min_aspect_diff = aspect_diff
new_h, new_w = h, w
resized_images = [img.resize((new_w, new_h), resample=Image.BICUBIC) for img in images_pil]
return resized_images
# ---------------------------
# pipeline init
# ---------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
flux_pipeline = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev")
flux_pipeline.scheduler = FlowMatchEulerDiscreteScheduler.from_config(flux_pipeline.scheduler.config)
flux_pipeline.scheduler.config.stochastic_sampling = False
# precision & device
flux_pipeline.vae.to(device).to(torch.bfloat16)
flux_pipeline.text_encoder.to(device).to(torch.bfloat16)
flux_pipeline.text_encoder_2.to(device).to(torch.bfloat16)
# 替换 transformer 权重
ckpt_path = hf_hub_download("NoobDoge/Multi_Ref_Model", "full_model.safetensors")
new_weight = load_file(ckpt_path)
flux_pipeline.transformer.load_state_dict(new_weight)
# flux_pipeline.transformer = FluxTransformer2DModel.from_single_file(ckpt_path, torch_dtype=torch.bfloat16)
flux_pipeline.transformer.to(device).to(torch.bfloat16)
# ---------------------------
# constants
# ---------------------------
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1568 # 与下方滑块默认值 1024 保持一致
# ---------------------------
# inference
# ---------------------------
@spaces.GPU # [uncomment to use ZeroGPU]
def infer(
prompt,
ref1, # PIL.Image 或 None
ref2, # PIL.Image 或 None(可选)
seed,
randomize_seed,
width,
height,
max_area,
guidance_scale, # 目前没传入 pipeline,如需要可在下面调用里加上
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
# 组装可选参考图列表
refs = [x for x in (ref1, ref2) if x is not None]
if len(refs) == 0:
raise gr.Error("请至少上传一张参考图(ref1 或 ref2)。")
# 随机种子
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(int(seed))
# 参考图按桶缩放
raw_images = resize_by_bucket(refs, resolution=max_area)
# 推理
with torch.no_grad():
out = flux_pipeline(
image=raw_images,
prompt=prompt,
height=height,
width=width,
num_inference_steps=int(num_inference_steps),
max_area=max_area ** 2,
generator=generator,
# 如需 guidance_scale,确保 pipeline 支持这个参数后再打开:
# guidance_scale=float(guidance_scale),
)
output_img = out.images[0]
return output_img, int(seed)
# ---------------------------
# UI
# ---------------------------
example_triples = [
["assets/-1KG_J6e_src1.png", "assets/-1KG_J6e_src2.png",
"Transform the first image (pizza) into an oil painting style, using the warm and textured brushstrokes, color gradients, and artistic composition observed in the second image (stone house painting)."],
["assets/BgYeqlzB_src1.png", "assets/BgYeqlzB_src2.png",
"Place the butterfly from the first image onto the landscape of the second image, positioning it either flying above the river near the bridge or perched on one of the trees in the foreground. Adjust the butterfly's size and blending to ensure it fits naturally in the scene."],
["assets/H99pnBoC_src1.png", "assets/H99pnBoC_src2.png",
"Insert the person from the first image into the autumn park setting of the second image. Position them standing next to the person on the bench and have them interact by tipping their hat in greeting."],
["assets/pmkexBUx_src1.png", "assets/pmkexBUx_src2.png",
"Place the person wearing a wide-brimmed hat and beige scarf/shawl from the first image onto the mountain ridge in the second image. Position them as if standing on the ridge, facing the valley view, to create an immersive outdoors scene."],
["assets/Uwn0WEbC_src1.png", "assets/Uwn0WEbC_src2.png",
"Change the clothing of the person in the first image to match the attire shown in the second image, ensuring the details of the dress, including the blue bodice, white blouse, and the overall rustic aesthetic, are faithfully replicated."],
["assets/Uwn0WEbC_src2.png", "assets/Uwn0WEbC_src1.png",
"Change the clothing of the person in the first image to match the attire shown in the second image, ensuring the details of the clothes, and the overall rustic aesthetic, are faithfully replicated."]
]
css = """ #col-container { margin: 0 auto; max-width: 800px; } """
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# Multi Ref Edit Demo")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
# 两张输入图片(ref2 可空)
with gr.Row():
ref1_comp = gr.Image(label="Input Image 1", type="pil")
ref2_comp = gr.Image(label="Input Image 2 (optional)", type="pil")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
seed_comp = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed_comp = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width_comp = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
height_comp = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
max_area_comp = gr.Slider(
label="Max Area",
minimum=512,
maximum=1024,
step=512,
value=512,
)
with gr.Row():
guidance_scale_comp = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=2.5,
)
num_inference_steps_comp = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
# ⚠️ 这里把 inputs 的顺序改为 [ref1, ref2, prompt],示例条目也按这个顺序
gr.Examples(
examples=example_triples,
inputs=[ref1_comp, ref2_comp, prompt],
label="Examples (2 refs + prompt)",
# run_on_click=True, # 如果希望点示例后自动运行,可以取消注释
)
# 注意:不要把 [ref1, ref2] 当作列表传给 inputs!
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
prompt,
ref1_comp,
ref2_comp, # ref2 可为空
seed_comp,
randomize_seed_comp,
width_comp,
height_comp,
max_area_comp,
guidance_scale_comp,
num_inference_steps_comp,
],
outputs=[result, seed_comp],
)
if __name__ == "__main__":
demo.launch()