Spaces:
Running
on
Zero
Running
on
Zero
Trellis loading Bug
Browse files
README.md
CHANGED
|
@@ -5,7 +5,7 @@ colorFrom: yellow
|
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
python_version: 3.10.13
|
| 8 |
-
sdk_version: 5.
|
| 9 |
app_file: app.py
|
| 10 |
pinned: true
|
| 11 |
short_description: Transform Your Images into Mesmerizing Hexagon Grids
|
|
|
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
python_version: 3.10.13
|
| 8 |
+
sdk_version: 5.22.0
|
| 9 |
app_file: app.py
|
| 10 |
pinned: true
|
| 11 |
short_description: Transform Your Images into Mesmerizing Hexagon Grids
|
app.py
CHANGED
|
@@ -754,6 +754,7 @@ def replace_input_with_sketch_image(sketch_image):
|
|
| 754 |
|
| 755 |
@spaces.GPU(progress=gr.Progress(track_tqdm=True))
|
| 756 |
def load_trellis_model():
|
|
|
|
| 757 |
global TRELLIS_PIPELINE
|
| 758 |
loaded = False
|
| 759 |
if TRELLIS_PIPELINE == None:
|
|
@@ -772,7 +773,9 @@ def load_trellis_model():
|
|
| 772 |
print(f"Error preloading TRELLIS_PIPELINE: {e}")
|
| 773 |
gr.Error(f"Failed to load TRELLIS_PIPELINE: {e}")
|
| 774 |
TRELLIS_PIPELINE = None
|
| 775 |
-
|
|
|
|
|
|
|
| 776 |
|
| 777 |
def load_3d_models(is_open: bool = True) -> bool:
|
| 778 |
if is_open:
|
|
@@ -781,21 +784,20 @@ def load_3d_models(is_open: bool = True) -> bool:
|
|
| 781 |
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
| 782 |
depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
|
| 783 |
print("DPT models loaded\n")
|
| 784 |
-
|
| 785 |
-
|
| 786 |
print("3D models loaded")
|
| 787 |
gr.Info("3D models loaded.")
|
| 788 |
-
#else:
|
| 789 |
-
# gr.Error("Failed to load TRELLIS_PIPELINE.")
|
| 790 |
return gr.update(interactive = is_open)
|
| 791 |
|
| 792 |
def unload_3d_models(is_open: bool = False) -> bool:
|
| 793 |
if not is_open:
|
| 794 |
gr.Info("Unloading 3D models...")
|
| 795 |
global image_processor, depth_model, TRELLIS_PIPELINE
|
| 796 |
-
if
|
| 797 |
-
TRELLIS_PIPELINE
|
| 798 |
-
|
|
|
|
| 799 |
if depth_model:
|
| 800 |
del image_processor
|
| 801 |
del depth_model
|
|
@@ -946,74 +948,79 @@ def generate_3d_asset_part2(depth_img, image_path, output_name, seed, steps, mod
|
|
| 946 |
image_raw = Image.open(image_path).convert("RGB")
|
| 947 |
resized_image = resize_image_with_aspect_ratio(image_raw, model_resolution, model_resolution)
|
| 948 |
depth_img = Image.open(depth_img).convert("RGBA")
|
| 949 |
-
|
| 950 |
-
|
| 951 |
-
|
| 952 |
-
|
| 953 |
-
|
| 954 |
-
|
| 955 |
-
|
| 956 |
-
|
| 957 |
-
|
| 958 |
-
|
| 959 |
-
|
| 960 |
-
"
|
| 961 |
-
|
| 962 |
-
|
| 963 |
-
|
| 964 |
-
|
| 965 |
-
|
| 966 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 967 |
|
| 968 |
-
|
| 969 |
-
|
| 970 |
-
|
| 971 |
-
|
| 972 |
-
|
| 973 |
-
|
| 974 |
-
|
| 975 |
-
|
| 976 |
-
|
| 977 |
|
| 978 |
-
|
| 979 |
-
|
| 980 |
-
|
| 981 |
-
|
| 982 |
-
|
| 983 |
-
|
| 984 |
-
|
| 985 |
-
|
| 986 |
-
|
| 987 |
-
|
| 988 |
-
|
| 989 |
-
|
| 990 |
-
|
| 991 |
-
|
| 992 |
-
|
| 993 |
-
|
| 994 |
-
|
| 995 |
-
|
| 996 |
-
|
| 997 |
-
|
| 998 |
|
| 999 |
-
|
| 1000 |
-
|
| 1001 |
|
| 1002 |
-
|
| 1003 |
-
|
| 1004 |
-
|
| 1005 |
-
|
| 1006 |
-
|
| 1007 |
-
|
| 1008 |
-
|
| 1009 |
-
|
| 1010 |
-
|
| 1011 |
|
| 1012 |
-
|
| 1013 |
-
|
| 1014 |
-
|
|
|
|
|
|
|
| 1015 |
|
| 1016 |
-
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], output_name)
|
| 1017 |
if torch.cuda.is_available():
|
| 1018 |
torch.cuda.empty_cache()
|
| 1019 |
torch.cuda.ipc_collect()
|
|
@@ -1595,9 +1602,9 @@ with gr.Blocks(css_paths="style_20250314.css", title=title, theme='Surn/beeuty',
|
|
| 1595 |
# outputs=[depth_map_output, model_output, model_file], scroll_to_output=True
|
| 1596 |
# )
|
| 1597 |
accordian_3d.expand(
|
| 1598 |
-
|
| 1599 |
-
|
| 1600 |
-
).then(
|
| 1601 |
fn=load_3d_models,
|
| 1602 |
trigger_mode="always_last",
|
| 1603 |
outputs=[generate_3d_asset_button],
|
|
@@ -1675,13 +1682,13 @@ if __name__ == "__main__":
|
|
| 1675 |
|
| 1676 |
# image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
| 1677 |
# depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
|
| 1678 |
-
|
| 1679 |
-
|
| 1680 |
-
|
| 1681 |
-
|
| 1682 |
-
|
| 1683 |
-
|
| 1684 |
-
|
| 1685 |
hexaGrid.queue(default_concurrency_limit=1,max_size=12,api_open=False)
|
| 1686 |
hexaGrid.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered", 'e:/TMP'], favicon_path="./assets/favicon.ico", max_file_size="10mb")
|
| 1687 |
|
|
|
|
| 754 |
|
| 755 |
@spaces.GPU(progress=gr.Progress(track_tqdm=True))
|
| 756 |
def load_trellis_model():
|
| 757 |
+
gr.Info("TRELLIS_PIPELINE load start", 60)
|
| 758 |
global TRELLIS_PIPELINE
|
| 759 |
loaded = False
|
| 760 |
if TRELLIS_PIPELINE == None:
|
|
|
|
| 773 |
print(f"Error preloading TRELLIS_PIPELINE: {e}")
|
| 774 |
gr.Error(f"Failed to load TRELLIS_PIPELINE: {e}")
|
| 775 |
TRELLIS_PIPELINE = None
|
| 776 |
+
else:
|
| 777 |
+
loaded = True
|
| 778 |
+
print("TRELLIS_PIPELINE already loaded\n")
|
| 779 |
|
| 780 |
def load_3d_models(is_open: bool = True) -> bool:
|
| 781 |
if is_open:
|
|
|
|
| 784 |
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
| 785 |
depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
|
| 786 |
print("DPT models loaded\n")
|
| 787 |
+
if not constants.IS_SHARED_SPACE:
|
| 788 |
+
load_trellis_model()
|
| 789 |
print("3D models loaded")
|
| 790 |
gr.Info("3D models loaded.")
|
|
|
|
|
|
|
| 791 |
return gr.update(interactive = is_open)
|
| 792 |
|
| 793 |
def unload_3d_models(is_open: bool = False) -> bool:
|
| 794 |
if not is_open:
|
| 795 |
gr.Info("Unloading 3D models...")
|
| 796 |
global image_processor, depth_model, TRELLIS_PIPELINE
|
| 797 |
+
if not constants.IS_SHARED_SPACE:
|
| 798 |
+
if TRELLIS_PIPELINE:
|
| 799 |
+
TRELLIS_PIPELINE.cpu()
|
| 800 |
+
TRELLIS_PIPELINE = None
|
| 801 |
if depth_model:
|
| 802 |
del image_processor
|
| 803 |
del depth_model
|
|
|
|
| 948 |
image_raw = Image.open(image_path).convert("RGB")
|
| 949 |
resized_image = resize_image_with_aspect_ratio(image_raw, model_resolution, model_resolution)
|
| 950 |
depth_img = Image.open(depth_img).convert("RGBA")
|
| 951 |
+
if TRELLIS_PIPELINE is None:
|
| 952 |
+
gr.Warning(f"Trellis Pipeline is not initialized: {TRELLIS_PIPELINE.device()}")
|
| 953 |
+
return [None, None, depth_img]
|
| 954 |
+
else:
|
| 955 |
+
# Preprocess and run the Trellis pipeline with fixed sampler settings
|
| 956 |
+
try:
|
| 957 |
+
TRELLIS_PIPELINE.cuda()
|
| 958 |
+
processed_image = TRELLIS_PIPELINE.preprocess_image(resized_image, max_resolution=model_resolution)
|
| 959 |
+
outputs = TRELLIS_PIPELINE.run(
|
| 960 |
+
processed_image,
|
| 961 |
+
seed=seed,
|
| 962 |
+
formats=["gaussian", "mesh"],
|
| 963 |
+
preprocess_image=False,
|
| 964 |
+
sparse_structure_sampler_params={
|
| 965 |
+
"steps": steps,
|
| 966 |
+
"cfg_strength": 7.5,
|
| 967 |
+
},
|
| 968 |
+
slat_sampler_params={
|
| 969 |
+
"steps": steps,
|
| 970 |
+
"cfg_strength": 3.0,
|
| 971 |
+
},
|
| 972 |
+
)
|
| 973 |
|
| 974 |
+
# Validate the mesh
|
| 975 |
+
mesh = outputs['mesh'][0]
|
| 976 |
+
meshisdict = isinstance(mesh, dict)
|
| 977 |
+
if meshisdict:
|
| 978 |
+
vertices = mesh['vertices']
|
| 979 |
+
faces = mesh['faces']
|
| 980 |
+
else:
|
| 981 |
+
vertices = mesh.vertices
|
| 982 |
+
faces = mesh.faces
|
| 983 |
|
| 984 |
+
print(f"Mesh vertices: {vertices.shape}, faces: {faces.shape}")
|
| 985 |
+
if faces.max() >= vertices.shape[0]:
|
| 986 |
+
raise ValueError(f"Invalid mesh: face index {faces.max()} exceeds vertex count {vertices.shape[0]}")
|
| 987 |
+
except Exception as e:
|
| 988 |
+
gr.Warning(f"Error generating 3D asset: {e}")
|
| 989 |
+
print(f"Error generating 3D asset: {e}")
|
| 990 |
+
torch.cuda.empty_cache()
|
| 991 |
+
torch.cuda.ipc_collect()
|
| 992 |
+
return None,None, depth_img
|
| 993 |
+
|
| 994 |
+
# Ensure data is on GPU and has correct type
|
| 995 |
+
if not vertices.is_cuda or not faces.is_cuda:
|
| 996 |
+
raise ValueError("Mesh data must be on GPU")
|
| 997 |
+
if vertices.dtype != torch.float32 or faces.dtype != torch.int32:
|
| 998 |
+
if meshisdict:
|
| 999 |
+
mesh['faces'] = faces.to(torch.int32)
|
| 1000 |
+
mesh['vertices'] = vertices.to(torch.float32)
|
| 1001 |
+
else:
|
| 1002 |
+
mesh.faces = faces.to(torch.int32)
|
| 1003 |
+
mesh.vertices = vertices.to(torch.float32)
|
| 1004 |
|
| 1005 |
+
user_dir = os.path.join(constants.TMPDIR, str(req.session_hash))
|
| 1006 |
+
os.makedirs(user_dir, exist_ok=True)
|
| 1007 |
|
| 1008 |
+
video = render_utils.render_video(outputs['gaussian'][0], resolution=video_resolution, num_frames=64, r=1, fov=45)['color']
|
| 1009 |
+
try:
|
| 1010 |
+
video_geo = render_utils.render_video(outputs['mesh'][0], resolution=video_resolution, num_frames=64, r=1, fov=45)['normal']
|
| 1011 |
+
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
| 1012 |
+
except Exception as e:
|
| 1013 |
+
gr.Info(f"Error rendering video: {e}")
|
| 1014 |
+
print(f"Error rendering video: {e}")
|
| 1015 |
+
video_path = os.path.join(user_dir, f'{output_name}.mp4')
|
| 1016 |
+
imageio.mimsave(video_path, video, fps=8)
|
| 1017 |
|
| 1018 |
+
#snapshot_results = render_utils.render_snapshot_depth(outputs['mesh'][0], resolution=1280, r=1, fov=80)
|
| 1019 |
+
#depth_snapshot = Image.fromarray(snapshot_results['normal'][0]).convert("L")
|
| 1020 |
+
depth_snapshot = depth_img
|
| 1021 |
+
|
| 1022 |
+
state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], output_name)
|
| 1023 |
|
|
|
|
| 1024 |
if torch.cuda.is_available():
|
| 1025 |
torch.cuda.empty_cache()
|
| 1026 |
torch.cuda.ipc_collect()
|
|
|
|
| 1602 |
# outputs=[depth_map_output, model_output, model_file], scroll_to_output=True
|
| 1603 |
# )
|
| 1604 |
accordian_3d.expand(
|
| 1605 |
+
# fn=load_trellis_model,
|
| 1606 |
+
# trigger_mode="always_last"
|
| 1607 |
+
# ).then(
|
| 1608 |
fn=load_3d_models,
|
| 1609 |
trigger_mode="always_last",
|
| 1610 |
outputs=[generate_3d_asset_button],
|
|
|
|
| 1682 |
|
| 1683 |
# image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
| 1684 |
# depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
|
| 1685 |
+
if constants.IS_SHARED_SPACE:
|
| 1686 |
+
TRELLIS_PIPELINE = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
|
| 1687 |
+
TRELLIS_PIPELINE.to(device)
|
| 1688 |
+
try:
|
| 1689 |
+
TRELLIS_PIPELINE.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))) # Preload rembg
|
| 1690 |
+
except:
|
| 1691 |
+
pass
|
| 1692 |
hexaGrid.queue(default_concurrency_limit=1,max_size=12,api_open=False)
|
| 1693 |
hexaGrid.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered", 'e:/TMP'], favicon_path="./assets/favicon.ico", max_file_size="10mb")
|
| 1694 |
|