Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -272,17 +272,6 @@ def infer(ref_style_file, style_description, caption):
|
|
| 272 |
# Reset the state after inference, regardless of success or failure
|
| 273 |
reset_inference_state()
|
| 274 |
|
| 275 |
-
def transform(tensor):
|
| 276 |
-
"""
|
| 277 |
-
Define the necessary transformations for the image.
|
| 278 |
-
"""
|
| 279 |
-
to_pil = T.ToPILImage()
|
| 280 |
-
return T.Compose([
|
| 281 |
-
T.Resize((256, 256)), # Example resize, adjust as needed
|
| 282 |
-
T.ToTensor(),
|
| 283 |
-
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 284 |
-
])(to_pil(tensor))
|
| 285 |
-
|
| 286 |
def infer_compo(style_description, ref_style_file, caption, ref_sub_file):
|
| 287 |
global models_rbm, models_b
|
| 288 |
try:
|
|
@@ -324,7 +313,8 @@ def infer_compo(style_description, ref_style_file, caption, ref_sub_file):
|
|
| 324 |
|
| 325 |
# Convert tensor to PIL Image before passing to sam_model.predict
|
| 326 |
x0_preview_pil = T.ToPILImage()(x0_preview[0])
|
| 327 |
-
|
|
|
|
| 328 |
sam_mask = sam_mask.detach().unsqueeze(dim=0).to(device)
|
| 329 |
|
| 330 |
conditions = core.get_conditions(batch, models_rbm, extras, is_eval=True, is_unconditional=False, eval_image_embeds=True, eval_subject_style=True, eval_csd=False)
|
|
|
|
| 272 |
# Reset the state after inference, regardless of success or failure
|
| 273 |
reset_inference_state()
|
| 274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
def infer_compo(style_description, ref_style_file, caption, ref_sub_file):
|
| 276 |
global models_rbm, models_b
|
| 277 |
try:
|
|
|
|
| 313 |
|
| 314 |
# Convert tensor to PIL Image before passing to sam_model.predict
|
| 315 |
x0_preview_pil = T.ToPILImage()(x0_preview[0])
|
| 316 |
+
x0_preview_tensor = T.ToTensor()(x0_preview_pil) # Convert PIL Image back to tensor
|
| 317 |
+
sam_mask, boxes, phrases, logits = sam_model.predict(x0_preview_pil, sam_prompt)
|
| 318 |
sam_mask = sam_mask.detach().unsqueeze(dim=0).to(device)
|
| 319 |
|
| 320 |
conditions = core.get_conditions(batch, models_rbm, extras, is_eval=True, is_unconditional=False, eval_image_embeds=True, eval_subject_style=True, eval_csd=False)
|