Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -38,6 +38,7 @@ def encode(init_image, torch_device, ae):
|
|
| 38 |
init_image = torch.from_numpy(init_image).permute(2, 0, 1).float() / 127.5 - 1
|
| 39 |
init_image = init_image.unsqueeze(0)
|
| 40 |
init_image = init_image.to(torch_device)
|
|
|
|
| 41 |
with torch.no_grad():
|
| 42 |
init_image = ae.encode(init_image.to()).to(torch.bfloat16)
|
| 43 |
return init_image
|
|
@@ -125,7 +126,8 @@ class FluxEditor:
|
|
| 125 |
os.mkdir(self.feature_path)
|
| 126 |
|
| 127 |
with torch.no_grad():
|
| 128 |
-
|
|
|
|
| 129 |
inp_target = prepare(self.t5, self.clip, init_image, prompt=opts.target_prompt)
|
| 130 |
timesteps = get_schedule(opts.num_steps, inp["img"].shape[1], shift=(self.name != "flux-schnell"))
|
| 131 |
|
|
@@ -137,14 +139,14 @@ class FluxEditor:
|
|
| 137 |
|
| 138 |
# inversion initial noise
|
| 139 |
with torch.no_grad():
|
| 140 |
-
z, info = denoise(self.model, **inp, timesteps=timesteps, guidance=1, inverse=True, info=info)
|
| 141 |
|
| 142 |
inp_target["img"] = z
|
| 143 |
|
| 144 |
timesteps = get_schedule(opts.num_steps, inp_target["img"].shape[1], shift=(self.name != "flux-schnell"))
|
| 145 |
|
| 146 |
# denoise initial noise
|
| 147 |
-
x, _ = denoise(self.model, **inp_target, timesteps=timesteps, guidance=guidance, inverse=False, info=info)
|
| 148 |
|
| 149 |
# offload model, load autoencoder to gpu
|
| 150 |
if self.offload:
|
|
@@ -166,6 +168,7 @@ class FluxEditor:
|
|
| 166 |
else:
|
| 167 |
idx = 0
|
| 168 |
|
|
|
|
| 169 |
with torch.autocast(device_type=self.device.type, dtype=torch.bfloat16):
|
| 170 |
x = self.ae.decode(x)
|
| 171 |
|
|
|
|
| 38 |
init_image = torch.from_numpy(init_image).permute(2, 0, 1).float() / 127.5 - 1
|
| 39 |
init_image = init_image.unsqueeze(0)
|
| 40 |
init_image = init_image.to(torch_device)
|
| 41 |
+
ae = ae.cuda()
|
| 42 |
with torch.no_grad():
|
| 43 |
init_image = ae.encode(init_image.to()).to(torch.bfloat16)
|
| 44 |
return init_image
|
|
|
|
| 126 |
os.mkdir(self.feature_path)
|
| 127 |
|
| 128 |
with torch.no_grad():
|
| 129 |
+
self.t5, self.clip = self.t5.cuda(), self.clip.cuda()
|
| 130 |
+
inp = prepare(self.t5.cuda(), self.clip, init_image, prompt=opts.source_prompt)
|
| 131 |
inp_target = prepare(self.t5, self.clip, init_image, prompt=opts.target_prompt)
|
| 132 |
timesteps = get_schedule(opts.num_steps, inp["img"].shape[1], shift=(self.name != "flux-schnell"))
|
| 133 |
|
|
|
|
| 139 |
|
| 140 |
# inversion initial noise
|
| 141 |
with torch.no_grad():
|
| 142 |
+
z, info = denoise(self.model.cuda(), **inp, timesteps=timesteps, guidance=1, inverse=True, info=info)
|
| 143 |
|
| 144 |
inp_target["img"] = z
|
| 145 |
|
| 146 |
timesteps = get_schedule(opts.num_steps, inp_target["img"].shape[1], shift=(self.name != "flux-schnell"))
|
| 147 |
|
| 148 |
# denoise initial noise
|
| 149 |
+
x, _ = denoise(self.model.cuda(), **inp_target, timesteps=timesteps, guidance=guidance, inverse=False, info=info)
|
| 150 |
|
| 151 |
# offload model, load autoencoder to gpu
|
| 152 |
if self.offload:
|
|
|
|
| 168 |
else:
|
| 169 |
idx = 0
|
| 170 |
|
| 171 |
+
ae = ae.cuda()
|
| 172 |
with torch.autocast(device_type=self.device.type, dtype=torch.bfloat16):
|
| 173 |
x = self.ae.decode(x)
|
| 174 |
|