LogicGoInfotechSpaces commited on
Commit
3936786
·
1 Parent(s): f01916f

fix(space): expose FastAPI app from server.py to avoid diffusers mixin import

Browse files
Files changed (1) hide show
  1. app.py +1 -185
app.py CHANGED
@@ -1,185 +1 @@
1
- try:
2
- import gradio as gr
3
- except Exception:
4
- gr = None
5
- import torch
6
- from PIL import Image
7
- import numpy as np
8
- from PIL import Image
9
- from omegaconf import OmegaConf
10
- import os
11
- import cv2
12
- from diffusers import DDIMScheduler, UniPCMultistepScheduler
13
- from diffusers.models import UNet2DConditionModel
14
- from ref_encoder.latent_controlnet import ControlNetModel
15
- from ref_encoder.adapter import *
16
- from ref_encoder.reference_unet import ref_unet
17
- from utils.pipeline import StableHairPipeline
18
- from utils.pipeline_cn import StableDiffusionControlNetPipeline
19
- from huggingface_hub import hf_hub_download
20
-
21
-
22
- class StableHair:
23
- def __init__(self, config="./configs/hair_transfer.yaml", device="cuda", weight_dtype=torch.float32) -> None:
24
- print("Initializing Stable Hair Pipeline...")
25
- self.config = OmegaConf.load(config)
26
- self.device = device
27
-
28
- # Hugging Face repo with weights
29
- repo_id = "LogicGoInfotechSpaces/new_weights"
30
-
31
- # Map config paths to Hugging Face repo structure
32
- # Based on config: pretrained_folder: "./models/stage2"
33
- # encoder_path: "pytorch_model.bin" -> stage2/pytorch_model.bin
34
- # adapter_path: "pytorch_model_1.bin" -> stage2/pytorch_model_1.bin
35
- # controlnet_path: "pytorch_model_2.bin" -> stage2/pytorch_model_2.bin
36
- # bald_converter_path: "./models/stage1/pytorch_model.bin" -> stage1/pytorch_model.bin
37
-
38
- # Download weights from Hugging Face
39
- encoder_hf_path = hf_hub_download(repo_id=repo_id, filename="stage2/pytorch_model.bin")
40
- adapter_hf_path = hf_hub_download(repo_id=repo_id, filename="stage2/pytorch_model_1.bin")
41
- controlnet_hf_path = hf_hub_download(repo_id=repo_id, filename="stage2/pytorch_model_2.bin")
42
- bald_converter_hf_path = hf_hub_download(repo_id=repo_id, filename="stage1/pytorch_model.bin")
43
-
44
- ### Load vae controlnet
45
- unet = UNet2DConditionModel.from_pretrained(self.config.pretrained_model_path, subfolder="unet").to(device)
46
- controlnet = ControlNetModel.from_unet(unet).to(device)
47
- _state_dict = torch.load(controlnet_hf_path, map_location="cpu")
48
- controlnet.load_state_dict(_state_dict, strict=False)
49
- controlnet.to(weight_dtype)
50
-
51
- ### >>> create pipeline >>> ###
52
- self.pipeline = StableHairPipeline.from_pretrained(
53
- self.config.pretrained_model_path,
54
- controlnet=controlnet,
55
- safety_checker=None,
56
- torch_dtype=weight_dtype,
57
- ).to(device)
58
- self.pipeline.scheduler = DDIMScheduler.from_config(self.pipeline.scheduler.config)
59
-
60
- ### load Hair encoder/adapter
61
- self.hair_encoder = ref_unet.from_pretrained(self.config.pretrained_model_path, subfolder="unet").to(device)
62
- _state_dict = torch.load(encoder_hf_path, map_location="cpu")
63
- self.hair_encoder.load_state_dict(_state_dict, strict=False)
64
- self.hair_adapter = adapter_injection(self.pipeline.unet, device=self.device, dtype=torch.float16, use_resampler=False)
65
- _state_dict = torch.load(adapter_hf_path, map_location="cpu")
66
- self.hair_adapter.load_state_dict(_state_dict, strict=False)
67
-
68
- ### load bald converter
69
- bald_converter = ControlNetModel.from_unet(unet).to(device)
70
- _state_dict = torch.load(bald_converter_hf_path, map_location="cpu")
71
- bald_converter.load_state_dict(_state_dict, strict=False)
72
- bald_converter.to(dtype=weight_dtype)
73
- del unet
74
-
75
- ### create pipeline for hair removal
76
- self.remove_hair_pipeline = StableDiffusionControlNetPipeline.from_pretrained(
77
- self.config.pretrained_model_path,
78
- controlnet=bald_converter,
79
- safety_checker=None,
80
- torch_dtype=weight_dtype,
81
- )
82
- self.remove_hair_pipeline.scheduler = UniPCMultistepScheduler.from_config(self.remove_hair_pipeline.scheduler.config)
83
- self.remove_hair_pipeline = self.remove_hair_pipeline.to(device)
84
-
85
- ### move to fp16
86
- self.hair_encoder.to(weight_dtype)
87
- self.hair_adapter.to(weight_dtype)
88
-
89
- print("Initialization Done!")
90
-
91
- def Hair_Transfer(self, source_image, reference_image, random_seed, step, guidance_scale, scale, controlnet_conditioning_scale):
92
- prompt = ""
93
- n_prompt = ""
94
- random_seed = int(random_seed)
95
- step = int(step)
96
- guidance_scale = float(guidance_scale)
97
- scale = float(scale)
98
- controlnet_conditioning_scale = float(controlnet_conditioning_scale)
99
-
100
- # load imgs
101
- H, W, C = source_image.shape
102
-
103
- # generate images
104
- set_scale(self.pipeline.unet, scale)
105
- generator = torch.Generator(device="cuda")
106
- generator.manual_seed(random_seed)
107
- sample = self.pipeline(
108
- prompt,
109
- negative_prompt=n_prompt,
110
- num_inference_steps=step,
111
- guidance_scale=guidance_scale,
112
- width=W,
113
- height=H,
114
- controlnet_condition=source_image,
115
- controlnet_conditioning_scale=controlnet_conditioning_scale,
116
- generator=generator,
117
- reference_encoder=self.hair_encoder,
118
- ref_image=reference_image,
119
- ).samples
120
- return sample, source_image, reference_image
121
-
122
- def get_bald(self, id_image, scale):
123
- H, W = id_image.size
124
- scale = float(scale)
125
- image = self.remove_hair_pipeline(
126
- prompt="",
127
- negative_prompt="",
128
- num_inference_steps=30,
129
- guidance_scale=1.5,
130
- width=W,
131
- height=H,
132
- image=id_image,
133
- controlnet_conditioning_scale=scale,
134
- generator=None,
135
- ).images[0]
136
-
137
- return image
138
-
139
-
140
- model = StableHair(config="./configs/hair_transfer.yaml", weight_dtype=torch.float32)
141
-
142
- # Define your ML model or function here
143
- def model_call(id_image, ref_hair, converter_scale, scale, guidance_scale, controlnet_conditioning_scale):
144
- # # Your ML logic goes here
145
- id_image = Image.fromarray(id_image.astype('uint8'), 'RGB')
146
- ref_hair = Image.fromarray(ref_hair.astype('uint8'), 'RGB')
147
- id_image = id_image.resize((512, 512))
148
- ref_hair = ref_hair.resize((512, 512))
149
- id_image_bald = model.get_bald(id_image, converter_scale)
150
-
151
- id_image_bald = np.array(id_image_bald)
152
- ref_hair = np.array(ref_hair)
153
-
154
- image, source_image, reference_image = model.Hair_Transfer(source_image=id_image_bald,
155
- reference_image=ref_hair,
156
- random_seed=-1,
157
- step=30,
158
- guidance_scale=guidance_scale,
159
- scale=scale,
160
- controlnet_conditioning_scale=controlnet_conditioning_scale
161
- )
162
-
163
- image = Image.fromarray((image * 255.).astype(np.uint8))
164
- return id_image_bald, image
165
-
166
- # Create a Gradio interface
167
- if gr is not None:
168
- iface = gr.Interface(
169
- fn=model_call,
170
- inputs=[
171
- gr.Image(label="ID Image"),
172
- gr.Image(label="Reference Hair"),
173
- gr.Slider(minimum=0.5, maximum=1.5, value=1, label="Converter Scale"),
174
- gr.Slider(minimum=0.0, maximum=3.0, value=1.0, label="Hair Encoder Scale"),
175
- gr.Slider(minimum=1.1, maximum=3.0, value=1.5, label="CFG"),
176
- gr.Slider(minimum=0.1, maximum=2.0, value=1, label="Latent IdentityNet Scale"),
177
- ],
178
- outputs=[
179
- gr.Image(type="pil", label="Bald Result"),
180
- gr.Image(type="pil", label="Transfer Result"),
181
- ],
182
- title="Hair Transfer Demo",
183
- description="In general, aligned faces work well, but can also be used on non-aligned faces, and you need to resize to 512 * 512"
184
- )
185
- iface.queue().launch(server_name='0.0.0.0', server_port=7860, share=True)
 
1
+ from server import app # Hugging Face Spaces will import app:app