Spaces:
Running
on
Zero
Running
on
Zero
File size: 8,235 Bytes
26a63c0 2897497 26a63c0 2897497 26a63c0 2897497 26a63c0 b71e18c 8da4893 26a63c0 334dc18 26a63c0 a541cba 26a63c0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
import os
import re
from PIL import Image
import spaces
import gradio as gr
import uuid
import argparse
from huggingface_hub import login, snapshot_download
import torch
from dreamomni2.pipeline_dreamomni2 import DreamOmni2Pipeline
from diffusers.utils import load_image
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
from utils.vprocess import process_vision_info, resizeinput
def extract_gen_content(text):
text = text[6:-7]
return text
def _load_model_processor():
device = "cuda" if torch.cuda.is_available() else "cpu"
local_dir = snapshot_download(
repo_id="xiabs/DreamOmni2",
revision="main",
allow_patterns=["vlm-model/**", "gen_lora/**"],
)
vlm_dir = os.path.join(local_dir, 'vlm-model')
lora_dir = os.path.join(local_dir, 'gen_lora')
print(f"Loading models from vlm_path: {vlm_dir}, gen_lora_path: {lora_dir}")
pipe = DreamOmni2Pipeline.from_pretrained(
"black-forest-labs/FLUX.1-Kontext-dev",
torch_dtype=torch.bfloat16
).to(device)
pipe.load_lora_weights(lora_dir, adapter_name="generation")
pipe.set_adapters(["generation"], adapter_weights=[1])
vlm_model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
vlm_dir,
torch_dtype="bfloat16"
).to(device)
processor = AutoProcessor.from_pretrained(vlm_dir)
return vlm_model, processor, pipe
def _launch_demo(vlm_model, processor, pipe):
@spaces.GPU(duration=90)
def infer_vlm(input_img_path, input_instruction, prefix):
if not vlm_model or not processor:
raise gr.Error("VLM Model not loaded. Cannot process prompt.")
tp = []
for path in input_img_path:
tp.append({"type": "image", "image": path})
tp.append({"type": "text", "text": input_instruction + prefix})
messages = [{"role": "user", "content": tp}]
text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt")
inputs = inputs.to(device=vlm_model.device)
generated_ids = vlm_model.generate(**inputs, do_sample=False, max_new_tokens=4096)
generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)
return output_text[0]
PREFERRED_KONTEXT_RESOLUTIONS = [
(672, 1568),
(688, 1504),
(720, 1456),
(752, 1392),
(800, 1328),
(832, 1248),
(880, 1184),
(944, 1104),
(1024, 1024),
(1104, 944),
(1184, 880),
(1248, 832),
(1328, 800),
(1392, 752),
(1456, 720),
(1504, 688),
(1568, 672),
]
def find_closest_resolution(width, height, preferred_resolutions):
input_ratio = width / height
closest_resolution = min(
preferred_resolutions,
key=lambda res: abs((res[0] / res[1]) - input_ratio)
)
return closest_resolution
@spaces.GPU(duration=90)
def perform_generation(input_img_paths, input_instruction, output_path, height=1024, width=1024):
prefix = " It is generation task."
source_imgs = []
for path in input_img_paths:
img = load_image(path)
# source_imgs.append(img)
source_imgs.append(resizeinput(img))
prompt = infer_vlm(input_img_paths, input_instruction, prefix)
prompt = extract_gen_content(prompt)
print(f"Generated Prompt for VLM: {prompt}")
image = pipe(
images=source_imgs,
height=height,
width=width,
prompt=prompt,
num_inference_steps=30,
guidance_scale=3.5,
).images[0]
image.save(output_path)
print(f"Generation result saved to {output_path}")
@spaces.GPU(duration=90)
def process_request(image_file_1, image_file_2, instruction):
# debugpy.listen(5678)
# print("Waiting for debugger attach...")
# debugpy.wait_for_client()
if not image_file_1 or not image_file_2:
raise gr.Error("Please upload both images.")
if not instruction:
raise gr.Error("Please provide an instruction.")
if not pipe or not vlm_model:
raise gr.Error("Models not loaded. Check the console for errors.")
output_path = f"/tmp/{uuid.uuid4()}.png"
input_img_paths = [image_file_1, image_file_2] # List of file paths from the two gr.File inputs
perform_generation(input_img_paths, instruction, output_path)
return output_path
css = """
.text-center { text-align: center; }
.result-img img {
max-height: 60vh !important;
min-height: 30vh !important;
width: auto !important;
object-fit: contain;
}
.input-img img {
max-height: 30vh !important;
width: auto !important;
object-fit: contain;
}
"""
with gr.Blocks(theme=gr.themes.Soft(), title="DreamOmni2", css=css) as demo:
gr.HTML(
"""
<h1 style="text-align:center; font-size:40px; font-weight:bold; margin-bottom:16px;">
DreamOmni2: Multimodal Image Generation and Editing
</h1>
"""
)
gr.Markdown(
"Upload two images, provide an instruction, and click 'Run'.",
elem_classes="text-center"
)
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("⬆️ Upload images. Click or drag to upload.")
with gr.Row():
image_uploader_1 = gr.Image(
label="Img 1",
type="filepath",
interactive=True,
elem_classes="input-img",
)
image_uploader_2 = gr.Image(
label="Img 2",
type="filepath",
interactive=True,
elem_classes="input-img",
)
instruction_text = gr.Textbox(
label="Instruction",
lines=2,
placeholder="Input your instruction for generation or editing here...",
)
run_button = gr.Button("Run", variant="primary")
with gr.Column(scale=2):
gr.Markdown("🖼️ **Generation Mode**: Create new scenes from reference images.\n\n"
"Tip: If the result is not what you expect, try clicking **Run** again. ")
output_image = gr.Image(
label="Result",
type="filepath",
elem_classes="result-img",
)
# --- Examples ---
gr.Markdown("## Examples")
gr.Examples(
label="Generation Examples",
examples=[
[
"example_input/gen_tests/img1.jpg",
"example_input/gen_tests/img2.jpg",
"In the scene, the character from the first image stands on the left, and the character from the second image stands on the right. They are shaking hands against the backdrop of a spaceship interior.",
"example_input/gen_tests/gen_res.png"
]
],
inputs=[image_uploader_1, image_uploader_2, instruction_text, output_image],
cache_examples=False,
)
run_button.click(
fn=process_request,
inputs=[image_uploader_1, image_uploader_2, instruction_text],
outputs=output_image
)
demo.launch()
if __name__ == "__main__":
vlm_model, processor, pipe = _load_model_processor()
_launch_demo(vlm_model, processor, pipe)
|