Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,14 +5,19 @@ import gradio as gr
|
|
| 5 |
import numpy as np
|
| 6 |
from PIL import Image, ImageDraw
|
| 7 |
import torchvision.transforms.functional as TF
|
| 8 |
-
from matplotlib import
|
| 9 |
-
from transformers import AutoModel
|
| 10 |
|
| 11 |
# ----------------------------
|
| 12 |
# Configuration
|
| 13 |
# ----------------------------
|
| 14 |
-
#
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
PATCH_SIZE = 16
|
| 17 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 18 |
|
|
@@ -20,41 +25,41 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 20 |
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 21 |
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 22 |
|
|
|
|
|
|
|
|
|
|
| 23 |
# ----------------------------
|
| 24 |
# Model Loading (Hugging Face Hub)
|
| 25 |
# ----------------------------
|
| 26 |
-
def load_model_from_hub():
|
| 27 |
-
"""Loads
|
| 28 |
-
print(f"Loading model '{
|
| 29 |
try:
|
| 30 |
-
# This will use the HF_TOKEN secret if you set it in your Space settings.
|
| 31 |
token = os.environ.get("HF_TOKEN")
|
| 32 |
-
|
| 33 |
-
model = AutoModel.from_pretrained(MODEL_ID, token=token, trust_remote_code=True)
|
| 34 |
model.to(DEVICE).eval()
|
| 35 |
print(f"β
Model loaded successfully on device: {DEVICE}")
|
| 36 |
return model
|
| 37 |
except Exception as e:
|
| 38 |
print(f"β Failed to load model: {e}")
|
| 39 |
-
# This will display a clear error message in the Gradio interface
|
| 40 |
raise gr.Error(
|
| 41 |
-
f"Could not load model '{
|
| 42 |
"This is a gated model. Please ensure you have accepted the terms on its Hugging Face page "
|
| 43 |
"and set your HF_TOKEN as a secret in your Space settings. "
|
| 44 |
f"Original error: {e}"
|
| 45 |
)
|
| 46 |
|
| 47 |
-
|
| 48 |
-
model
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
# ----------------------------
|
| 51 |
-
# Helper Functions (resize, viz)
|
| 52 |
# ----------------------------
|
| 53 |
def resize_to_grid(img: Image.Image, long_side: int, patch: int) -> torch.Tensor:
|
| 54 |
-
"""
|
| 55 |
-
Resizes so max(h,w)=long_side (keeping aspect), then rounds each side UP to a multiple of 'patch'.
|
| 56 |
-
Returns CHW float tensor in [0,1].
|
| 57 |
-
"""
|
| 58 |
w, h = img.size
|
| 59 |
scale = long_side / max(h, w)
|
| 60 |
new_h = max(patch, int(round(h * scale)))
|
|
@@ -110,7 +115,8 @@ def patch_neighborhood_box(r: int, c: int, Hp: int, Wp: int, rad: int, patch: in
|
|
| 110 |
# Feature Extraction (using transformers)
|
| 111 |
# ----------------------------
|
| 112 |
@torch.inference_mode()
|
| 113 |
-
|
|
|
|
| 114 |
"""
|
| 115 |
Extracts patch features from an image using the loaded Hugging Face model.
|
| 116 |
"""
|
|
@@ -118,23 +124,19 @@ def extract_image_features(image_pil: Image.Image, target_long_side: int):
|
|
| 118 |
t_norm = TF.normalize(t, IMAGENET_MEAN, IMAGENET_STD).unsqueeze(0).to(DEVICE)
|
| 119 |
_, _, H, W = t_norm.shape
|
| 120 |
Hp, Wp = H // PATCH_SIZE, W // PATCH_SIZE
|
| 121 |
-
|
| 122 |
-
# π‘ Use the standard forward pass of the transformers model
|
| 123 |
outputs = model(t_norm)
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
# We must skip all 5 to get only the patch embeddings.
|
| 127 |
-
n_special_tokens = 5
|
| 128 |
patch_embeddings = outputs.last_hidden_state.squeeze(0)[n_special_tokens:, :]
|
| 129 |
-
|
| 130 |
-
# L2-normalize the features to prepare for cosine similarity
|
| 131 |
X = F.normalize(patch_embeddings, p=2, dim=-1)
|
| 132 |
|
| 133 |
img_resized = TF.to_pil_image(t)
|
| 134 |
return {"X": X, "Hp": Hp, "Wp": Wp, "img": img_resized}
|
| 135 |
|
| 136 |
# ----------------------------
|
| 137 |
-
# Similarity inside the same image
|
| 138 |
# ----------------------------
|
| 139 |
def click_to_similarity_in_same_image(
|
| 140 |
state: dict,
|
|
@@ -206,15 +208,21 @@ def click_to_similarity_in_same_image(
|
|
| 206 |
# ----------------------------
|
| 207 |
with gr.Blocks(theme=gr.themes.Soft(), title="DINOv3 Single-Image Patch Similarity") as demo:
|
| 208 |
gr.Markdown("# π¦ DINOv3 Single-Image Patch Similarity")
|
| 209 |
-
gr.Markdown("## Running on CPU-only Space, feature extraction
|
| 210 |
-
gr.Markdown("1. Upload an image.
|
| 211 |
|
| 212 |
app_state = gr.State()
|
| 213 |
|
| 214 |
with gr.Row():
|
| 215 |
with gr.Column(scale=1):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
input_image = gr.Image(
|
| 217 |
-
label="
|
| 218 |
type="pil",
|
| 219 |
value="https://images.squarespace-cdn.com/content/v1/607f89e638219e13eee71b1e/1684821560422-SD5V37BAG28BURTLIXUQ/michael-sum-LEpfefQf4rU-unsplash.jpg"
|
| 220 |
)
|
|
@@ -223,9 +231,8 @@ with gr.Blocks(theme=gr.themes.Soft(), title="DINOv3 Single-Image Patch Similari
|
|
| 223 |
label="Processing Resolution",
|
| 224 |
info="Higher values = more detail but slower processing",
|
| 225 |
)
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
with gr.Row():
|
| 230 |
alpha = gr.Slider(0.0, 1.0, value=0.55, step=0.05, label="Overlay opacity")
|
| 231 |
cmap = gr.Dropdown(
|
|
@@ -238,19 +245,24 @@ with gr.Blocks(theme=gr.themes.Soft(), title="DINOv3 Single-Image Patch Similari
|
|
| 238 |
box_radius = gr.Slider(0, 10, value=1, step=1, label="Box radius (patches)")
|
| 239 |
|
| 240 |
with gr.Row():
|
| 241 |
-
marked_image = gr.Image(label="
|
| 242 |
heatmap_output = gr.Image(label="Similarity heatmap", interactive=False)
|
| 243 |
with gr.Row():
|
| 244 |
overlay_output = gr.Image(label="Overlay (image β heatmap)", interactive=False)
|
| 245 |
overlay_boxes_output = gr.Image(label="Overlay + top-K similar patch boxes", interactive=False)
|
| 246 |
|
| 247 |
-
#
|
| 248 |
-
def _process_image(img: Image.Image, long_side: int, progress=gr.Progress(track_tqdm=True)):
|
| 249 |
if img is None:
|
| 250 |
gr.Warning("Please upload an image first!")
|
| 251 |
return None, None
|
| 252 |
-
|
| 253 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 254 |
progress(1, desc="Done! You can now click on the image.")
|
| 255 |
return st["img"], st
|
| 256 |
|
|
@@ -264,18 +276,16 @@ with gr.Blocks(theme=gr.themes.Soft(), title="DINOv3 Single-Image Patch Similari
|
|
| 264 |
box_radius_patches=int(box_rad),
|
| 265 |
)
|
| 266 |
|
| 267 |
-
#
|
| 268 |
-
inputs_for_processing = [input_image, target_long_side]
|
| 269 |
outputs_for_processing = [marked_image, app_state]
|
| 270 |
-
|
| 271 |
-
# The button now triggers the main processing function
|
| 272 |
process_button.click(
|
| 273 |
-
_process_image,
|
| 274 |
-
inputs=inputs_for_processing,
|
| 275 |
outputs=outputs_for_processing
|
| 276 |
)
|
| 277 |
-
|
| 278 |
-
# The click event on the image remains the same
|
| 279 |
marked_image.select(
|
| 280 |
_on_click,
|
| 281 |
inputs=[app_state, alpha, cmap, exclude_r, topk, box_radius],
|
|
|
|
| 5 |
import numpy as np
|
| 6 |
from PIL import Image, ImageDraw
|
| 7 |
import torchvision.transforms.functional as TF
|
| 8 |
+
from matplotlib import colaps
|
| 9 |
+
from transformers import AutoModel
|
| 10 |
|
| 11 |
# ----------------------------
|
| 12 |
# Configuration
|
| 13 |
# ----------------------------
|
| 14 |
+
# β Define available models, with the smaller one as default
|
| 15 |
+
MODELS = {
|
| 16 |
+
"DINOv3 ViT-S+ (Small, Default)": "facebook/dinov3-vits16plus-pretrain-lvd1689m",
|
| 17 |
+
"DINOv3 ViT-H+ (Huge)": "facebook/dinov3-vith16plus-pretrain-lvd1689m",
|
| 18 |
+
}
|
| 19 |
+
DEFAULT_MODEL_NAME = "DINOv3 ViT-S+ (Small, Default)"
|
| 20 |
+
|
| 21 |
PATCH_SIZE = 16
|
| 22 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 23 |
|
|
|
|
| 25 |
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
| 26 |
IMAGENET_STD = (0.229, 0.224, 0.225)
|
| 27 |
|
| 28 |
+
# β Cache for loaded models to avoid re-downloading
|
| 29 |
+
model_cache = {}
|
| 30 |
+
|
| 31 |
# ----------------------------
|
| 32 |
# Model Loading (Hugging Face Hub)
|
| 33 |
# ----------------------------
|
| 34 |
+
def load_model_from_hub(model_id: str):
|
| 35 |
+
"""Loads a DINOv3 model from the Hugging Face Hub."""
|
| 36 |
+
print(f"Loading model '{model_id}' from Hugging Face Hub...")
|
| 37 |
try:
|
|
|
|
| 38 |
token = os.environ.get("HF_TOKEN")
|
| 39 |
+
model = AutoModel.from_pretrained(model_id, token=token, trust_remote_code=True)
|
|
|
|
| 40 |
model.to(DEVICE).eval()
|
| 41 |
print(f"β
Model loaded successfully on device: {DEVICE}")
|
| 42 |
return model
|
| 43 |
except Exception as e:
|
| 44 |
print(f"β Failed to load model: {e}")
|
|
|
|
| 45 |
raise gr.Error(
|
| 46 |
+
f"Could not load model '{model_id}'. "
|
| 47 |
"This is a gated model. Please ensure you have accepted the terms on its Hugging Face page "
|
| 48 |
"and set your HF_TOKEN as a secret in your Space settings. "
|
| 49 |
f"Original error: {e}"
|
| 50 |
)
|
| 51 |
|
| 52 |
+
def get_model(model_name: str):
|
| 53 |
+
"""Gets a model from the cache or loads it if not present."""
|
| 54 |
+
model_id = MODELS[model_name]
|
| 55 |
+
if model_id not in model_cache:
|
| 56 |
+
model_cache[model_id] = load_model_from_hub(model_id)
|
| 57 |
+
return model_cache[model_id]
|
| 58 |
|
| 59 |
# ----------------------------
|
| 60 |
+
# Helper Functions (resize, viz) - No changes here
|
| 61 |
# ----------------------------
|
| 62 |
def resize_to_grid(img: Image.Image, long_side: int, patch: int) -> torch.Tensor:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
w, h = img.size
|
| 64 |
scale = long_side / max(h, w)
|
| 65 |
new_h = max(patch, int(round(h * scale)))
|
|
|
|
| 115 |
# Feature Extraction (using transformers)
|
| 116 |
# ----------------------------
|
| 117 |
@torch.inference_mode()
|
| 118 |
+
# β Pass the model object as an argument
|
| 119 |
+
def extract_image_features(model, image_pil: Image.Image, target_long_side: int):
|
| 120 |
"""
|
| 121 |
Extracts patch features from an image using the loaded Hugging Face model.
|
| 122 |
"""
|
|
|
|
| 124 |
t_norm = TF.normalize(t, IMAGENET_MEAN, IMAGENET_STD).unsqueeze(0).to(DEVICE)
|
| 125 |
_, _, H, W = t_norm.shape
|
| 126 |
Hp, Wp = H // PATCH_SIZE, W // PATCH_SIZE
|
| 127 |
+
|
|
|
|
| 128 |
outputs = model(t_norm)
|
| 129 |
+
|
| 130 |
+
n_special_tokens = 5
|
|
|
|
|
|
|
| 131 |
patch_embeddings = outputs.last_hidden_state.squeeze(0)[n_special_tokens:, :]
|
| 132 |
+
|
|
|
|
| 133 |
X = F.normalize(patch_embeddings, p=2, dim=-1)
|
| 134 |
|
| 135 |
img_resized = TF.to_pil_image(t)
|
| 136 |
return {"X": X, "Hp": Hp, "Wp": Wp, "img": img_resized}
|
| 137 |
|
| 138 |
# ----------------------------
|
| 139 |
+
# Similarity inside the same image - No changes here
|
| 140 |
# ----------------------------
|
| 141 |
def click_to_similarity_in_same_image(
|
| 142 |
state: dict,
|
|
|
|
| 208 |
# ----------------------------
|
| 209 |
with gr.Blocks(theme=gr.themes.Soft(), title="DINOv3 Single-Image Patch Similarity") as demo:
|
| 210 |
gr.Markdown("# π¦ DINOv3 Single-Image Patch Similarity")
|
| 211 |
+
gr.Markdown("## Running on CPU-only Space, feature extraction can take a moment")
|
| 212 |
+
gr.Markdown("1. **Choose a model**. 2. Upload an image. 3. Click **Process Image**. 4. **Click anywhere on the processed image** to find similar regions.")
|
| 213 |
|
| 214 |
app_state = gr.State()
|
| 215 |
|
| 216 |
with gr.Row():
|
| 217 |
with gr.Column(scale=1):
|
| 218 |
+
# β ADDED MODEL DROPDOWN
|
| 219 |
+
model_name_dd = gr.Dropdown(
|
| 220 |
+
label="1. Choose a Model",
|
| 221 |
+
choices=list(MODELS.keys()),
|
| 222 |
+
value=DEFAULT_MODEL_NAME,
|
| 223 |
+
)
|
| 224 |
input_image = gr.Image(
|
| 225 |
+
label="2. Upload Image",
|
| 226 |
type="pil",
|
| 227 |
value="https://images.squarespace-cdn.com/content/v1/607f89e638219e13eee71b1e/1684821560422-SD5V37BAG28BURTLIXUQ/michael-sum-LEpfefQf4rU-unsplash.jpg"
|
| 228 |
)
|
|
|
|
| 231 |
label="Processing Resolution",
|
| 232 |
info="Higher values = more detail but slower processing",
|
| 233 |
)
|
| 234 |
+
process_button = gr.Button("3. Process Image", variant="primary")
|
| 235 |
+
|
|
|
|
| 236 |
with gr.Row():
|
| 237 |
alpha = gr.Slider(0.0, 1.0, value=0.55, step=0.05, label="Overlay opacity")
|
| 238 |
cmap = gr.Dropdown(
|
|
|
|
| 245 |
box_radius = gr.Slider(0, 10, value=1, step=1, label="Box radius (patches)")
|
| 246 |
|
| 247 |
with gr.Row():
|
| 248 |
+
marked_image = gr.Image(label="4. Click on this image", interactive=True)
|
| 249 |
heatmap_output = gr.Image(label="Similarity heatmap", interactive=False)
|
| 250 |
with gr.Row():
|
| 251 |
overlay_output = gr.Image(label="Overlay (image β heatmap)", interactive=False)
|
| 252 |
overlay_boxes_output = gr.Image(label="Overlay + top-K similar patch boxes", interactive=False)
|
| 253 |
|
| 254 |
+
# β UPDATED to take model_name as input
|
| 255 |
+
def _process_image(model_name: str, img: Image.Image, long_side: int, progress=gr.Progress(track_tqdm=True)):
|
| 256 |
if img is None:
|
| 257 |
gr.Warning("Please upload an image first!")
|
| 258 |
return None, None
|
| 259 |
+
|
| 260 |
+
progress(0, desc=f"Loading model '{model_name}'...")
|
| 261 |
+
model = get_model(model_name)
|
| 262 |
+
|
| 263 |
+
progress(0.5, desc="Extracting features...")
|
| 264 |
+
st = extract_image_features(model, img, int(long_side))
|
| 265 |
+
|
| 266 |
progress(1, desc="Done! You can now click on the image.")
|
| 267 |
return st["img"], st
|
| 268 |
|
|
|
|
| 276 |
box_radius_patches=int(box_rad),
|
| 277 |
)
|
| 278 |
|
| 279 |
+
# β UPDATED EVENT WIRING to include the dropdown
|
| 280 |
+
inputs_for_processing = [model_name_dd, input_image, target_long_side]
|
| 281 |
outputs_for_processing = [marked_image, app_state]
|
| 282 |
+
|
|
|
|
| 283 |
process_button.click(
|
| 284 |
+
_process_image,
|
| 285 |
+
inputs=inputs_for_processing,
|
| 286 |
outputs=outputs_for_processing
|
| 287 |
)
|
| 288 |
+
|
|
|
|
| 289 |
marked_image.select(
|
| 290 |
_on_click,
|
| 291 |
inputs=[app_state, alpha, cmap, exclude_r, topk, box_radius],
|