Spaces:
Runtime error
Runtime error
Ming Li
commited on
Commit
·
acbc9f8
1
Parent(s):
654fc61
change inference_mode to no_grad to avoid errors
Browse files- image_segmentor.py +1 -1
- model.py +5 -5
image_segmentor.py
CHANGED
|
@@ -13,7 +13,7 @@ class ImageSegmentor:
|
|
| 13 |
self.image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
|
| 14 |
self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
|
| 15 |
|
| 16 |
-
@torch.
|
| 17 |
def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
|
| 18 |
detect_resolution = kwargs.pop("detect_resolution", 512)
|
| 19 |
image_resolution = kwargs.pop("image_resolution", 512)
|
|
|
|
| 13 |
self.image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
|
| 14 |
self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
|
| 15 |
|
| 16 |
+
@torch.no_grad()
|
| 17 |
def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
|
| 18 |
detect_resolution = kwargs.pop("detect_resolution", 512)
|
| 19 |
image_resolution = kwargs.pop("image_resolution", 512)
|
model.py
CHANGED
|
@@ -126,7 +126,7 @@ class Model:
|
|
| 126 |
image=control_image,
|
| 127 |
).images
|
| 128 |
|
| 129 |
-
@torch.
|
| 130 |
@spaces.GPU(enable_queue=True)
|
| 131 |
def process_canny(
|
| 132 |
self,
|
|
@@ -171,7 +171,7 @@ class Model:
|
|
| 171 |
]
|
| 172 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 173 |
|
| 174 |
-
@torch.
|
| 175 |
@spaces.GPU(enable_queue=True)
|
| 176 |
def process_softedge(
|
| 177 |
self,
|
|
@@ -238,7 +238,7 @@ class Model:
|
|
| 238 |
]
|
| 239 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 240 |
|
| 241 |
-
@torch.
|
| 242 |
@spaces.GPU(enable_queue=True)
|
| 243 |
def process_segmentation(
|
| 244 |
self,
|
|
@@ -292,7 +292,7 @@ class Model:
|
|
| 292 |
]
|
| 293 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 294 |
|
| 295 |
-
@torch.
|
| 296 |
@spaces.GPU(enable_queue=True)
|
| 297 |
def process_depth(
|
| 298 |
self,
|
|
@@ -345,7 +345,7 @@ class Model:
|
|
| 345 |
]
|
| 346 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 347 |
|
| 348 |
-
@torch.
|
| 349 |
@spaces.GPU(enable_queue=True)
|
| 350 |
def process_lineart(
|
| 351 |
self,
|
|
|
|
| 126 |
image=control_image,
|
| 127 |
).images
|
| 128 |
|
| 129 |
+
@torch.no_grad()
|
| 130 |
@spaces.GPU(enable_queue=True)
|
| 131 |
def process_canny(
|
| 132 |
self,
|
|
|
|
| 171 |
]
|
| 172 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 173 |
|
| 174 |
+
@torch.no_grad()
|
| 175 |
@spaces.GPU(enable_queue=True)
|
| 176 |
def process_softedge(
|
| 177 |
self,
|
|
|
|
| 238 |
]
|
| 239 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 240 |
|
| 241 |
+
@torch.no_grad()
|
| 242 |
@spaces.GPU(enable_queue=True)
|
| 243 |
def process_segmentation(
|
| 244 |
self,
|
|
|
|
| 292 |
]
|
| 293 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 294 |
|
| 295 |
+
@torch.no_grad()
|
| 296 |
@spaces.GPU(enable_queue=True)
|
| 297 |
def process_depth(
|
| 298 |
self,
|
|
|
|
| 345 |
]
|
| 346 |
return [control_image] * num_images + results + conditions_of_generated_imgs
|
| 347 |
|
| 348 |
+
@torch.no_grad()
|
| 349 |
@spaces.GPU(enable_queue=True)
|
| 350 |
def process_lineart(
|
| 351 |
self,
|