Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
3c7a85f
1
Parent(s):
b3e34a7
add
Browse files
app.py
CHANGED
|
@@ -98,7 +98,6 @@ def load_models():
|
|
| 98 |
torch_dtype=torch.float16,
|
| 99 |
local_files_only=False,
|
| 100 |
resume_download=True,
|
| 101 |
-
token=True # Use token-based auth
|
| 102 |
)
|
| 103 |
break
|
| 104 |
except (ReadTimeout, ConnectionError) as e:
|
|
@@ -133,7 +132,6 @@ def load_models():
|
|
| 133 |
"YiftachEde/Sharp-It",
|
| 134 |
local_files_only=False,
|
| 135 |
resume_download=True,
|
| 136 |
-
token=True # Use token-based auth
|
| 137 |
).to(torch.float16)
|
| 138 |
break
|
| 139 |
except (ReadTimeout, ConnectionError) as e:
|
|
@@ -157,7 +155,6 @@ def load_models():
|
|
| 157 |
repo_type="model",
|
| 158 |
local_files_only=False,
|
| 159 |
resume_download=True,
|
| 160 |
-
token=True, # Use token-based auth
|
| 161 |
cache_dir="model_cache" # Use a specific cache directory
|
| 162 |
)
|
| 163 |
break
|
|
@@ -293,7 +290,7 @@ class ShapERenderer:
|
|
| 293 |
batch_size = 1
|
| 294 |
guidance_scale = float(guidance_scale)
|
| 295 |
|
| 296 |
-
with torch.
|
| 297 |
latents = sample_latents(
|
| 298 |
batch_size=batch_size,
|
| 299 |
model=self.model,
|
|
@@ -320,7 +317,7 @@ class ShapERenderer:
|
|
| 320 |
|
| 321 |
for i, (azimuth, elevation) in enumerate(zip(azimuths, elevations)):
|
| 322 |
cameras = create_custom_cameras(size, self.device, azimuths=[azimuth], elevations=[elevation], fov_degrees=30, distance=3.0)
|
| 323 |
-
with torch.
|
| 324 |
rendered_image = decode_latent_images(
|
| 325 |
self.xm,
|
| 326 |
latents[0],
|
|
@@ -381,7 +378,7 @@ class RefinerInterface:
|
|
| 381 |
input_image = Image.fromarray(new_layout)
|
| 382 |
|
| 383 |
# Process with the pipeline (expects 960x640)
|
| 384 |
-
with torch.
|
| 385 |
refined_output_960x640 = self.pipeline.refine(
|
| 386 |
input_image,
|
| 387 |
prompt=prompt,
|
|
@@ -392,7 +389,7 @@ class RefinerInterface:
|
|
| 392 |
torch.cuda.empty_cache() # Clear GPU memory after refinement
|
| 393 |
|
| 394 |
# Generate mesh using the 960x640 format
|
| 395 |
-
with torch.
|
| 396 |
vertices, faces, vertex_colors = create_mesh(
|
| 397 |
refined_output_960x640,
|
| 398 |
self.model,
|
|
|
|
| 98 |
torch_dtype=torch.float16,
|
| 99 |
local_files_only=False,
|
| 100 |
resume_download=True,
|
|
|
|
| 101 |
)
|
| 102 |
break
|
| 103 |
except (ReadTimeout, ConnectionError) as e:
|
|
|
|
| 132 |
"YiftachEde/Sharp-It",
|
| 133 |
local_files_only=False,
|
| 134 |
resume_download=True,
|
|
|
|
| 135 |
).to(torch.float16)
|
| 136 |
break
|
| 137 |
except (ReadTimeout, ConnectionError) as e:
|
|
|
|
| 155 |
repo_type="model",
|
| 156 |
local_files_only=False,
|
| 157 |
resume_download=True,
|
|
|
|
| 158 |
cache_dir="model_cache" # Use a specific cache directory
|
| 159 |
)
|
| 160 |
break
|
|
|
|
| 290 |
batch_size = 1
|
| 291 |
guidance_scale = float(guidance_scale)
|
| 292 |
|
| 293 |
+
with torch.amp.autocast('cuda'): # Use automatic mixed precision
|
| 294 |
latents = sample_latents(
|
| 295 |
batch_size=batch_size,
|
| 296 |
model=self.model,
|
|
|
|
| 317 |
|
| 318 |
for i, (azimuth, elevation) in enumerate(zip(azimuths, elevations)):
|
| 319 |
cameras = create_custom_cameras(size, self.device, azimuths=[azimuth], elevations=[elevation], fov_degrees=30, distance=3.0)
|
| 320 |
+
with torch.amp.autocast('cuda'): # Use automatic mixed precision
|
| 321 |
rendered_image = decode_latent_images(
|
| 322 |
self.xm,
|
| 323 |
latents[0],
|
|
|
|
| 378 |
input_image = Image.fromarray(new_layout)
|
| 379 |
|
| 380 |
# Process with the pipeline (expects 960x640)
|
| 381 |
+
with torch.amp.autocast('cuda'): # Use automatic mixed precision
|
| 382 |
refined_output_960x640 = self.pipeline.refine(
|
| 383 |
input_image,
|
| 384 |
prompt=prompt,
|
|
|
|
| 389 |
torch.cuda.empty_cache() # Clear GPU memory after refinement
|
| 390 |
|
| 391 |
# Generate mesh using the 960x640 format
|
| 392 |
+
with torch.amp.autocast('cuda'): # Use automatic mixed precision
|
| 393 |
vertices, faces, vertex_colors = create_mesh(
|
| 394 |
refined_output_960x640,
|
| 395 |
self.model,
|