Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -155,7 +155,7 @@ from einops import rearrange
|
|
| 155 |
from shap_e.diffusion.sample import sample_latents
|
| 156 |
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
|
| 157 |
from shap_e.models.download import load_model, load_config
|
| 158 |
-
from shap_e.util.notebooks import create_pan_cameras
|
| 159 |
from shap_e.models.nn.camera import DifferentiableCameraBatch, DifferentiableProjectiveCamera
|
| 160 |
import math
|
| 161 |
import time
|
|
@@ -171,6 +171,29 @@ from src.utils.camera_util import (
|
|
| 171 |
from src.utils.mesh_util import save_obj, save_glb
|
| 172 |
from src.utils.infer_util import remove_background, resize_foreground
|
| 173 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
def create_custom_cameras(size: int, device: torch.device, azimuths: list, elevations: list,
|
| 175 |
fov_degrees: float, distance: float) -> DifferentiableCameraBatch:
|
| 176 |
# Object is in a 2x2x2 bounding box (-1 to 1 in each dimension)
|
|
@@ -654,7 +677,7 @@ def create_demo():
|
|
| 654 |
)
|
| 655 |
|
| 656 |
# Set up event handlers
|
| 657 |
-
@spaces.GPU(duration=
|
| 658 |
def generate(prompt, guidance_scale, num_steps):
|
| 659 |
try:
|
| 660 |
# Ensure PyTorch3D works with CUDA
|
|
|
|
| 155 |
from shap_e.diffusion.sample import sample_latents
|
| 156 |
from shap_e.diffusion.gaussian_diffusion import diffusion_from_config
|
| 157 |
from shap_e.models.download import load_model, load_config
|
| 158 |
+
from shap_e.util.notebooks import create_pan_cameras
|
| 159 |
from shap_e.models.nn.camera import DifferentiableCameraBatch, DifferentiableProjectiveCamera
|
| 160 |
import math
|
| 161 |
import time
|
|
|
|
| 171 |
from src.utils.mesh_util import save_obj, save_glb
|
| 172 |
from src.utils.infer_util import remove_background, resize_foreground
|
| 173 |
|
| 174 |
+
def decode_latent_images(
|
| 175 |
+
xm: Union[Transmitter, VectorDecoder],
|
| 176 |
+
latent: torch.Tensor,
|
| 177 |
+
cameras: DifferentiableCameraBatch,
|
| 178 |
+
rendering_mode: str = "stf",
|
| 179 |
+
params = None,
|
| 180 |
+
background_color: torch.Tensor = torch.tensor([255.0, 255.0, 255.0], dtype=torch.float32),
|
| 181 |
+
):
|
| 182 |
+
params = params if params is not None else (xm.encoder if isinstance(xm, Transmitter) else xm).bottleneck_to_params(
|
| 183 |
+
latent[None]
|
| 184 |
+
)
|
| 185 |
+
params = xm.renderer.update(params)
|
| 186 |
+
decoded = xm.renderer.render_views(
|
| 187 |
+
AttrDict(cameras=cameras),
|
| 188 |
+
params=params,
|
| 189 |
+
options=AttrDict(rendering_mode=rendering_mode, render_with_direction=False),
|
| 190 |
+
)
|
| 191 |
+
bg_color = background_color.to(decoded.channels.device)
|
| 192 |
+
images = bg_color * decoded.transmittance + (1 - decoded.transmittance) * decoded.channels
|
| 193 |
+
|
| 194 |
+
# arr = decoded.channels.clamp(0, 255).to(torch.uint8)[0].cpu().numpy()
|
| 195 |
+
return images
|
| 196 |
+
|
| 197 |
def create_custom_cameras(size: int, device: torch.device, azimuths: list, elevations: list,
|
| 198 |
fov_degrees: float, distance: float) -> DifferentiableCameraBatch:
|
| 199 |
# Object is in a 2x2x2 bounding box (-1 to 1 in each dimension)
|
|
|
|
| 677 |
)
|
| 678 |
|
| 679 |
# Set up event handlers
|
| 680 |
+
@spaces.GPU(duration=20) # Reduced duration to 20 seconds
|
| 681 |
def generate(prompt, guidance_scale, num_steps):
|
| 682 |
try:
|
| 683 |
# Ensure PyTorch3D works with CUDA
|