Spaces:
Running
on
Zero
Running
on
Zero
| # Version: 1.1.3 - Load pipeline at module level for Spaces environment | |
| # Applied targeted fixes: | |
| # - Removed unsupported inputs/outputs kwargs on demo.load/unload | |
| # - Converted NumPy arrays to lists in pack_state for JSON safety | |
| # - Fixed indentation in Blocks event-handlers | |
| # - Verified clear() callbacks use only callback + outputs | |
| # - Removed `torch_dtype` arg from from_pretrained | |
| # - Moved pipeline initialization to module level so it's available in threads | |
| import gradio as gr | |
| import spaces | |
| import os | |
| import shutil | |
| os.environ['TOKENIZERS_PARALLELISM'] = 'true' | |
| os.environ['SPCONV_ALGO'] = 'native' | |
| from typing import * | |
| import torch | |
| import numpy as np | |
| import imageio | |
| from easydict import EasyDict as edict | |
| from trellis.pipelines import TrellisTextTo3DPipeline | |
| from trellis.representations import Gaussian, MeshExtractResult | |
| from trellis.utils import render_utils, postprocessing_utils | |
| import traceback | |
| import sys | |
| # --- Global Config --- | |
| MAX_SEED = np.iinfo(np.int32).max | |
| TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp') | |
| os.makedirs(TMP_DIR, exist_ok=True) | |
| # --- Initialize Trellis Pipeline at import time --- | |
| print("[Startup] Loading Trellis pipeline...") | |
| try: | |
| pipeline = TrellisTextTo3DPipeline.from_pretrained( | |
| "JeffreyXiang/TRELLIS-text-xlarge" | |
| ) | |
| if torch.cuda.is_available(): | |
| pipeline = pipeline.to("cuda") | |
| print("[Startup] Trellis pipeline loaded to GPU.") | |
| else: | |
| print("[Startup] Trellis pipeline loaded to CPU.") | |
| except Exception as e: | |
| print(f"❌ [Startup] Failed to load Trellis pipeline: {e}") | |
| raise | |
| def start_session(req: gr.Request): | |
| user_dir = os.path.join(TMP_DIR, str(req.session_hash)) | |
| os.makedirs(user_dir, exist_ok=True) | |
| print(f"Started session, created directory: {user_dir}") | |
| def end_session(req: gr.Request): | |
| user_dir = os.path.join(TMP_DIR, str(req.session_hash)) | |
| if os.path.exists(user_dir): | |
| try: | |
| shutil.rmtree(user_dir) | |
| print(f"Ended session, removed directory: {user_dir}") | |
| except OSError as e: | |
| print(f"Error removing tmp directory {user_dir}: {e.strerror}", file=sys.stderr) | |
| else: | |
| print(f"Ended session, directory already removed: {user_dir}") | |
| def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict: | |
| """Packs Gaussian and Mesh data into a JSON-serializable dictionary.""" | |
| return { | |
| 'gaussian': { | |
| **{k: v for k, v in gs.init_params.items()}, | |
| '_xyz': gs._xyz.detach().cpu().numpy().tolist(), | |
| '_features_dc': gs._features_dc.detach().cpu().numpy().tolist(), | |
| '_scaling': gs._scaling.detach().cpu().numpy().tolist(), | |
| '_rotation': gs._rotation.detach().cpu().numpy().tolist(), | |
| '_opacity': gs._opacity.detach().cpu().numpy().tolist(), | |
| }, | |
| 'mesh': { | |
| 'vertices': mesh.vertices.detach().cpu().numpy().tolist(), | |
| 'faces': mesh.faces.detach().cpu().numpy().tolist(), | |
| }, | |
| } | |
| def unpack_state(state_dict: dict) -> Tuple[Gaussian, edict]: | |
| device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
| gd = state_dict['gaussian'] | |
| md = state_dict['mesh'] | |
| gs = Gaussian( | |
| aabb=gd.get('aabb'), sh_degree=gd.get('sh_degree'), | |
| mininum_kernel_size=gd.get('mininum_kernel_size'), | |
| scaling_bias=gd.get('scaling_bias'), opacity_bias=gd.get('opacity_bias'), | |
| scaling_activation=gd.get('scaling_activation') | |
| ) | |
| gs._xyz = torch.tensor(np.array(gd['_xyz']), device=device, dtype=torch.float32) | |
| gs._features_dc = torch.tensor(np.array(gd['_features_dc']), device=device, dtype=torch.float32) | |
| gs._scaling = torch.tensor(np.array(gd['_scaling']), device=device, dtype=torch.float32) | |
| gs._rotation = torch.tensor(np.array(gd['_rotation']), device=device, dtype=torch.float32) | |
| gs._opacity = torch.tensor(np.array(gd['_opacity']), device=device, dtype=torch.float32) | |
| mesh = edict( | |
| vertices=torch.tensor(np.array(md['vertices']), device=device, dtype=torch.float32), | |
| faces=torch.tensor(np.array(md['faces']), device=device, dtype=torch.int64), | |
| ) | |
| return gs, mesh | |
| def get_seed(randomize_seed: bool, seed: int) -> int: | |
| return int(np.random.randint(0, MAX_SEED) if randomize_seed else seed) | |
| def text_to_3d( | |
| prompt: str, seed: int, | |
| ss_guidance_strength: float, ss_sampling_steps: int, | |
| slat_guidance_strength: float, slat_sampling_steps: int, | |
| req: gr.Request | |
| ) -> Tuple[dict, str]: | |
| out = pipeline.run( | |
| prompt, seed=seed, | |
| formats=["gaussian","mesh"], | |
| sparse_structure_sampler_params={"steps": ss_sampling_steps, "cfg_strength": ss_guidance_strength}, | |
| slat_sampler_params={"steps": slat_sampling_steps, "cfg_strength": slat_guidance_strength} | |
| ) | |
| state = pack_state(out['gaussian'][0], out['mesh'][0]) | |
| vid_c = render_utils.render_video(out['gaussian'][0],num_frames=120)['color'] | |
| vid_n = render_utils.render_video(out['mesh'][0],num_frames=120)['normal'] | |
| vid = [np.concatenate([c.astype(np.uint8), n.astype(np.uint8)], axis=1) for c,n in zip(vid_c,vid_n)] | |
| ud = os.path.join(TMP_DIR,str(req.session_hash)); os.makedirs(ud,exist_ok=True) | |
| vp = os.path.join(ud,'sample.mp4'); imageio.mimsave(vp,vid,fps=15,quality=8) | |
| if torch.cuda.is_available(): torch.cuda.empty_cache() | |
| return state, vp | |
| def extract_glb(state_dict: dict, mesh_simplify: float, texture_size: int, req: gr.Request): | |
| gs, mesh = unpack_state(state_dict) | |
| ud = os.path.join(TMP_DIR, str(req.session_hash)); os.makedirs(ud, exist_ok=True) | |
| glb = postprocessing_utils.to_glb(gs,mesh,simplify=mesh_simplify,texture_size=texture_size,verbose=True) | |
| gp = os.path.join(ud,'sample.glb'); glb.export(gp) | |
| if torch.cuda.is_available(): torch.cuda.empty_cache() | |
| return gp, gp | |
| def extract_gaussian(state_dict: dict, req: gr.Request): | |
| gs, _ = unpack_state(state_dict) | |
| ud = os.path.join(TMP_DIR, str(req.session_hash)); os.makedirs(ud, exist_ok=True) | |
| pp = os.path.join(ud,'sample.ply'); gs.save_ply(pp) | |
| if torch.cuda.is_available(): torch.cuda.empty_cache() | |
| return pp, pp | |
| # --- Gradio UI --- | |
| with gr.Blocks(delete_cache=(600,600), title="TRELLIS Text-to-3D") as demo: | |
| gr.Markdown(""" | |
| # Text to 3D Asset with TRELLIS | |
| """) | |
| output_buf = gr.State() | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| text_prompt = gr.Textbox(label="Text Prompt", lines=5) | |
| with gr.Accordion("Generation Settings", open=False): | |
| seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1) | |
| randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) | |
| gr.Markdown("--- Stage 1 ---") | |
| ss_guidance_strength = gr.Slider(0.0,15.0,label="Guidance Strength",value=7.5,step=0.1) | |
| ss_sampling_steps = gr.Slider(10,50,label="Steps",value=25,step=1) | |
| gr.Markdown("--- Stage 2 ---") | |
| slat_guidance_strength = gr.Slider(0.0,15.0,label="Guidance Strength",value=7.5,step=0.1) | |
| slat_sampling_steps = gr.Slider(10,50,label="Steps",value=25,step=1) | |
| generate_btn = gr.Button("Generate 3D Preview") | |
| with gr.Accordion("GLB Extraction Settings", open=True): | |
| mesh_simplify = gr.Slider(0.9,0.99,label="Simplify",value=0.95,step=0.01) | |
| texture_size = gr.Slider(512,2048,label="Texture Size",value=1024,step=512) | |
| extract_glb_btn = gr.Button("Extract GLB", interactive=False) | |
| extract_gs_btn = gr.Button("Extract Gaussian", interactive=False) | |
| download_glb = gr.DownloadButton("Download GLB", interactive=False) | |
| download_gs = gr.DownloadButton("Download Gaussian", interactive=False) | |
| with gr.Column(scale=1): | |
| video_output = gr.Video(autoplay=True,loop=True) | |
| model_output = gr.Model3D() | |
| # --- Handlers --- | |
| demo.load(start_session) | |
| demo.unload(end_session) | |
| generate_event = generate_btn.click( | |
| get_seed, | |
| inputs=[randomize_seed,seed], outputs=[seed] | |
| ).then( | |
| text_to_3d, | |
| inputs=[text_prompt,seed,ss_guidance_strength,ss_sampling_steps,slat_guidance_strength,slat_sampling_steps], | |
| outputs=[output_buf,video_output] | |
| ).then(lambda: (extract_glb_btn.update(interactive=True),extract_gs_btn.update(interactive=True)), outputs=[extract_glb_btn,extract_gs_btn]) | |
| extract_glb_btn.click( | |
| extract_glb, | |
| inputs=[output_buf,mesh_simplify,texture_size], | |
| outputs=[model_output,download_glb] | |
| ).then(lambda: download_glb.update(interactive=True), outputs=[download_glb]) | |
| extract_gs_btn.click( | |
| extract_gaussian, | |
| inputs=[output_buf], outputs=[model_output,download_gs] | |
| ).then(lambda: download_gs.update(interactive=True), outputs=[download_gs]) | |
| model_output.clear(lambda: (download_glb.update(interactive=False),download_gs.update(interactive=False)), outputs=[download_glb,download_gs]) | |
| video_output.clear(lambda: (extract_glb_btn.update(interactive=False),extract_gs_btn.update(interactive=False),download_glb.update(interactive=False),download_gs.update(interactive=False)), outputs=[extract_glb_btn,extract_gs_btn,download_glb,download_gs]) | |
| if __name__ == "__main__": | |
| demo.queue().launch(debug=True) | |