Update app.py
Browse files
app.py
CHANGED
|
@@ -76,55 +76,75 @@ def normalize_point_clouds(pcs, mode):
|
|
| 76 |
def predict(Seed, ckpt):
|
| 77 |
if Seed is None:
|
| 78 |
Seed = 777
|
| 79 |
-
seed_all(int(Seed))
|
| 80 |
-
|
| 81 |
-
#
|
| 82 |
-
#
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
print("
|
| 88 |
-
model_type = 'gaussian'
|
| 89 |
-
latent_dim = ckpt.get('latent_dim', 128) # A common default
|
| 90 |
-
flexibility = ckpt.get('flexibility', 0.0) # A common default
|
| 91 |
else:
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
#
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
'latent_dim': latent_dim,
|
| 106 |
-
'
|
| 107 |
-
'
|
| 108 |
-
'
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
})()
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
else:
|
| 112 |
-
raise ValueError(f"Unknown model type: {
|
| 113 |
|
| 114 |
model.load_state_dict(ckpt['state_dict'])
|
| 115 |
-
model.eval()
|
| 116 |
|
| 117 |
-
# Generate Point Clouds
|
| 118 |
gen_pcs = []
|
| 119 |
with torch.no_grad():
|
| 120 |
-
z = torch.randn([1, latent_dim]).to(device)
|
| 121 |
-
|
| 122 |
-
num_points_to_generate = getattr(ckpt.get('args', {}), 'num_points', 2048) # Default to 2048 if not in args
|
| 123 |
-
x = model.sample(z, num_points_to_generate, flexibility=flexibility)
|
| 124 |
gen_pcs.append(x.detach().cpu())
|
| 125 |
-
|
| 126 |
-
gen_pcs_tensor = torch.cat(gen_pcs, dim=0)[:1]
|
| 127 |
-
gen_pcs_normalized = normalize_point_clouds(gen_pcs_tensor.clone(), mode="shape_bbox")
|
| 128 |
|
| 129 |
return gen_pcs_normalized[0]
|
| 130 |
|
|
|
|
| 76 |
def predict(Seed, ckpt):
|
| 77 |
if Seed is None:
|
| 78 |
Seed = 777
|
| 79 |
+
seed_all(int(Seed))
|
| 80 |
+
|
| 81 |
+
# --- MODIFICATION START ---
|
| 82 |
+
# Try to get the original args from the checkpoint first
|
| 83 |
+
# The key might be 'args', 'config', or something similar.
|
| 84 |
+
# We need to inspect the actual keys of a loaded ckpt if this doesn't work.
|
| 85 |
+
if 'args' in ckpt and hasattr(ckpt['args'], 'model'):
|
| 86 |
+
actual_args = ckpt['args']
|
| 87 |
+
print("Using 'args' found in checkpoint.")
|
|
|
|
|
|
|
|
|
|
| 88 |
else:
|
| 89 |
+
# Fallback to constructing a mock_args if 'args' is not as expected
|
| 90 |
+
# This part needs to be more robust and include all necessary defaults
|
| 91 |
+
print("Warning: 'args' not found or 'args.model' missing in checkpoint. Constructing mock_args.")
|
| 92 |
+
|
| 93 |
+
# Defaults - these might need to be adjusted based on the original training scripts
|
| 94 |
+
# or by inspecting a correctly loaded checkpoint from the original repo.
|
| 95 |
+
default_latent_dim = 128
|
| 96 |
+
default_hyper = None # Or some sensible default if PointwiseNet/etc. need it
|
| 97 |
+
default_residual = True # Common default for PointwiseNet, but needs verification
|
| 98 |
+
default_flow_depth = 10
|
| 99 |
+
default_flow_hidden_dim = 256
|
| 100 |
+
default_model_type = 'gaussian' # Default if not found
|
| 101 |
+
default_num_points = 2048
|
| 102 |
+
default_flexibility = 0.0
|
| 103 |
+
|
| 104 |
+
# Try to get values from ckpt if they exist at the top level
|
| 105 |
+
# (some checkpoints might store them flatly instead of under an 'args' key)
|
| 106 |
+
model_type = ckpt.get('model', default_model_type) # Check if 'model' key exists directly
|
| 107 |
+
latent_dim = ckpt.get('latent_dim', default_latent_dim)
|
| 108 |
+
hyper = ckpt.get('hyper', default_hyper)
|
| 109 |
+
residual = ckpt.get('residual', default_residual)
|
| 110 |
+
flow_depth = ckpt.get('flow_depth', default_flow_depth)
|
| 111 |
+
flow_hidden_dim = ckpt.get('flow_hidden_dim', default_flow_hidden_dim)
|
| 112 |
+
num_points_to_generate = ckpt.get('num_points', default_num_points)
|
| 113 |
+
flexibility = ckpt.get('flexibility', default_flexibility)
|
| 114 |
+
|
| 115 |
+
# Create the mock_args object
|
| 116 |
+
actual_args = type('Args', (), {
|
| 117 |
+
'model': model_type,
|
| 118 |
'latent_dim': latent_dim,
|
| 119 |
+
'hyper': hyper,
|
| 120 |
+
'residual': residual, # Added residual
|
| 121 |
+
'flow_depth': flow_depth,
|
| 122 |
+
'flow_hidden_dim': flow_hidden_dim,
|
| 123 |
+
'num_points': num_points_to_generate,
|
| 124 |
+
'flexibility': flexibility
|
| 125 |
+
# Add any other attributes that models might expect from 'args'
|
| 126 |
})()
|
| 127 |
+
# --- MODIFICATION END ---
|
| 128 |
+
|
| 129 |
+
# Now use actual_args to instantiate models
|
| 130 |
+
if actual_args.model == 'gaussian':
|
| 131 |
+
model = GaussianVAE(actual_args).to(device)
|
| 132 |
+
elif actual_args.model == 'flow':
|
| 133 |
+
model = FlowVAE(actual_args).to(device)
|
| 134 |
else:
|
| 135 |
+
raise ValueError(f"Unknown model type: {actual_args.model}")
|
| 136 |
|
| 137 |
model.load_state_dict(ckpt['state_dict'])
|
| 138 |
+
model.eval()
|
| 139 |
|
|
|
|
| 140 |
gen_pcs = []
|
| 141 |
with torch.no_grad():
|
| 142 |
+
z = torch.randn([1, actual_args.latent_dim]).to(device)
|
| 143 |
+
x = model.sample(z, actual_args.num_points, flexibility=actual_args.flexibility)
|
|
|
|
|
|
|
| 144 |
gen_pcs.append(x.detach().cpu())
|
| 145 |
+
|
| 146 |
+
gen_pcs_tensor = torch.cat(gen_pcs, dim=0)[:1]
|
| 147 |
+
gen_pcs_normalized = normalize_point_clouds(gen_pcs_tensor.clone(), mode="shape_bbox")
|
| 148 |
|
| 149 |
return gen_pcs_normalized[0]
|
| 150 |
|