File size: 7,980 Bytes
7e328d3 123200e 7e328d3 dfca1c5 7e328d3 dfca1c5 baf4d0c dfca1c5 baf4d0c dfca1c5 baf4d0c dfca1c5 baf4d0c dfca1c5 baf4d0c dfca1c5 baf4d0c dfca1c5 baf4d0c dfca1c5 baf4d0c 7e328d3 dfca1c5 7e328d3 7c453a3 7e328d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import torch
from PIL import Image
import numpy as np
from PIL import Image
from omegaconf import OmegaConf
import os
import cv2
from diffusers import DDIMScheduler, UniPCMultistepScheduler
from diffusers.models import UNet2DConditionModel
from ref_encoder.latent_controlnet import ControlNetModel
from ref_encoder.adapter import *
from ref_encoder.reference_unet import ref_unet
from utils.pipeline import StableHairPipeline
from utils.pipeline_cn import StableDiffusionControlNetPipeline
def _resolve_weight(prefix_path: str, filename: str) -> str:
"""Resolve a weight path, downloading from Hugging Face Hub if needed.
prefix_path can be either a local directory (e.g., ./models/stage2)
or a hub path like Org/Repo/subfolder. When it looks like a hub path,
we download the file via hf_hub_download using repo_id Org/Repo and
subfolder the remaining segments.
"""
# Try local first
local_path = os.path.join(prefix_path, filename)
if os.path.exists(local_path):
return local_path
# Attempt Hub download
try:
from huggingface_hub import hf_hub_download
parts = prefix_path.strip("/").split("/")
if len(parts) >= 2:
repo_id = "/".join(parts[:2])
subfolder = "/".join(parts[2:]) if len(parts) > 2 else None
downloaded = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder,
token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
)
return downloaded
except Exception as exc: # noqa: WPS440
raise RuntimeError(f"Failed to fetch {filename} from hub ({prefix_path}): {exc}")
raise FileNotFoundError(f"Weight not found locally and not a valid hub path: {prefix_path}/{filename}")
def concatenate_images(image_files, output_file, type="pil"):
if type == "np":
image_files = [Image.fromarray(img) for img in image_files]
images = image_files # list
max_height = max(img.height for img in images)
images = [img.resize((img.width, max_height)) for img in images]
total_width = sum(img.width for img in images)
combined = Image.new('RGB', (total_width, max_height))
x_offset = 0
for img in images:
combined.paste(img, (x_offset, 0))
x_offset += img.width
combined.save(output_file)
class StableHair:
def __init__(self, config="./configs/hair_transfer.yaml", device="cuda", weight_dtype=torch.float32) -> None:
print("Initializing Stable Hair Pipeline...")
self.config = OmegaConf.load(config)
self.device = device
# Hugging Face repo with weights - using the working approach from app.py
repo_id = "LogicGoInfotechSpaces/new_weights"
# Download weights from Hugging Face using the direct approach that worked
from huggingface_hub import hf_hub_download
encoder_hf_path = hf_hub_download(repo_id=repo_id, filename="stage2/pytorch_model.bin")
adapter_hf_path = hf_hub_download(repo_id=repo_id, filename="stage2/pytorch_model_1.bin")
controlnet_hf_path = hf_hub_download(repo_id=repo_id, filename="stage2/pytorch_model_2.bin")
bald_converter_hf_path = hf_hub_download(repo_id=repo_id, filename="stage1/pytorch_model.bin")
### Load vae controlnet
unet = UNet2DConditionModel.from_pretrained(self.config.pretrained_model_path, subfolder="unet").to(device)
controlnet = ControlNetModel.from_unet(unet).to(device)
_state_dict = torch.load(controlnet_hf_path, map_location="cpu")
controlnet.load_state_dict(_state_dict, strict=False)
controlnet.to(weight_dtype)
### >>> create pipeline >>> ###
self.pipeline = StableHairPipeline.from_pretrained(
self.config.pretrained_model_path,
controlnet=controlnet,
safety_checker=None,
torch_dtype=weight_dtype,
).to(device)
self.pipeline.scheduler = DDIMScheduler.from_config(self.pipeline.scheduler.config)
### load Hair encoder/adapter
self.hair_encoder = ref_unet.from_pretrained(self.config.pretrained_model_path, subfolder="unet").to(device)
_state_dict = torch.load(encoder_hf_path, map_location="cpu")
self.hair_encoder.load_state_dict(_state_dict, strict=False)
self.hair_adapter = adapter_injection(self.pipeline.unet, device=self.device, dtype=torch.float16, use_resampler=False)
_state_dict = torch.load(adapter_hf_path, map_location="cpu")
self.hair_adapter.load_state_dict(_state_dict, strict=False)
### load bald converter
bald_converter = ControlNetModel.from_unet(unet).to(device)
_state_dict = torch.load(bald_converter_hf_path, map_location="cpu")
bald_converter.load_state_dict(_state_dict, strict=False)
bald_converter.to(dtype=weight_dtype)
del unet
### create pipeline for hair removal
self.remove_hair_pipeline = StableDiffusionControlNetPipeline.from_pretrained(
self.config.pretrained_model_path,
controlnet=bald_converter,
safety_checker=None,
torch_dtype=weight_dtype,
)
self.remove_hair_pipeline.scheduler = UniPCMultistepScheduler.from_config(self.remove_hair_pipeline.scheduler.config)
self.remove_hair_pipeline = self.remove_hair_pipeline.to(device)
### move to fp16
self.hair_encoder.to(weight_dtype)
self.hair_adapter.to(weight_dtype)
print("Initialization Done!")
def Hair_Transfer(self, source_image, reference_image, random_seed, step, guidance_scale, scale, controlnet_conditioning_scale, size=512):
prompt = ""
n_prompt = ""
random_seed = int(random_seed)
step = int(step)
guidance_scale = float(guidance_scale)
scale = float(scale)
# load imgs
source_image = Image.open(source_image).convert("RGB").resize((size, size))
id = np.array(source_image)
reference_image = np.array(Image.open(reference_image).convert("RGB").resize((size, size)))
source_image_bald = np.array(self.get_bald(source_image, scale=0.9))
H, W, C = source_image_bald.shape
# generate images
set_scale(self.pipeline.unet, scale)
generator = torch.Generator(device=self.device)
generator.manual_seed(random_seed)
sample = self.pipeline(
prompt,
negative_prompt=n_prompt,
num_inference_steps=step,
guidance_scale=guidance_scale,
width=W,
height=H,
controlnet_condition=source_image_bald,
controlnet_conditioning_scale=controlnet_conditioning_scale,
generator=generator,
reference_encoder=self.hair_encoder,
ref_image=reference_image,
).samples
return id, sample, source_image_bald, reference_image
def get_bald(self, id_image, scale):
H, W = id_image.size
scale = float(scale)
image = self.remove_hair_pipeline(
prompt="",
negative_prompt="",
num_inference_steps=30,
guidance_scale=1.5,
width=W,
height=H,
image=id_image,
controlnet_conditioning_scale=scale,
generator=None,
).images[0]
return image
if __name__ == '__main__':
model = StableHair(config="./configs/hair_transfer.yaml", weight_dtype=torch.float32)
kwargs = OmegaConf.to_container(model.config.inference_kwargs)
id, image, source_image_bald, reference_image = model.Hair_Transfer(**kwargs)
os.makedirs(model.config.output_path, exist_ok=True)
output_file = os.path.join(model.config.output_path, model.config.save_name)
concatenate_images([id, source_image_bald, reference_image, (image*255.).astype(np.uint8)], output_file=output_file, type="np")
|