Spaces:
Runtime error
Runtime error
File size: 4,538 Bytes
dd67299 afa5ea3 7c5c440 7116b90 afa5ea3 c559bc7 294389b afa5ea3 7fae8fb afa5ea3 dd67299 7c5c440 afa5ea3 c559bc7 afa5ea3 03852d5 afa5ea3 7116b90 afa5ea3 294389b afa5ea3 7116b90 afa5ea3 22ef1d5 afa5ea3 7c5c440 294389b afa5ea3 22ef1d5 afa5ea3 7116b90 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import os
import uuid
import tempfile
import gradio as gr
from qdrant_client import QdrantClient
from qdrant_client.models import VectorParams, Distance, PointStruct
from sentence_transformers import SentenceTransformer
from PIL import Image
import torch
import numpy as np
# --------------------------
# Qdrant Cloud Connection
# --------------------------
QDRANT_URL = "https://ff4da494-27b1-413c-ba58-d5ea14932fe1.europe-west3-0.gcp.cloud.qdrant.io"
QDRANT_API_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.98XRKd7ZdDXSfYDl44zbZ_VZ5csnh4tz1JACP62KZds"
COLLECTION_NAME = "lost_and_found"
# CLIP model (text + image embeddings)
MODEL_NAME = "sentence-transformers/clip-ViT-B-32"
embedder = SentenceTransformer(MODEL_NAME)
VECTOR_SIZE = embedder.get_sentence_embedding_dimension()
# Qdrant Client (Cloud)
qclient = QdrantClient(
url=QDRANT_URL,
api_key=QDRANT_API_KEY
)
# Ensure collection exists
qclient.recreate_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
)
# --------------------------
# Helper Functions
# --------------------------
def embed_text(text: str):
"""Generate embedding for text"""
return embedder.encode(text).tolist()
def embed_image(image: Image.Image):
"""Generate embedding for image"""
img_tensor = embedder.encode(image, convert_to_tensor=True)
return img_tensor.cpu().detach().numpy().tolist()
# --------------------------
# Core Functions
# --------------------------
def add_item(description, image):
"""Add a found item to Qdrant"""
if not description and image is None:
return "⚠️ Please provide description or image."
vectors = []
payload = {"description": description}
if description:
vectors = embed_text(description)
if image:
vectors = embed_image(image)
# Save uploaded image
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
image.save(tmp.name)
payload["image_path"] = tmp.name
point = PointStruct(
id=str(uuid.uuid4()),
vector=vectors,
payload=payload
)
qclient.upsert(collection_name=COLLECTION_NAME, points=[point])
return "✅ Item added successfully!"
def search_items(query_text, query_image, max_results, min_score):
"""Search lost items by text or image"""
vectors = None
if query_text:
vectors = embed_text(query_text)
elif query_image:
vectors = embed_image(query_image)
else:
return ["⚠️ Provide text or image to search."]
results = qclient.search(
collection_name=COLLECTION_NAME,
query_vector=vectors,
limit=max_results,
score_threshold=min_score,
)
if not results:
return ["No matches found."]
outputs = []
for r in results:
desc = r.payload.get("description", "No description")
img = r.payload.get("image_path", None)
score = round(r.score, 3)
if img:
outputs.append((img, f"{desc} (score: {score})"))
else:
outputs.append((None, f"{desc} (score: {score})"))
return outputs
# --------------------------
# Gradio UI
# --------------------------
with gr.Blocks(theme=gr.themes.Soft()) as demo:
with gr.Tab("➕ Add Found Item"):
with gr.Row():
desc_in = gr.Textbox(label="Item Description")
img_in = gr.Image(type="pil", label="Upload Image")
add_btn = gr.Button("Add Item")
add_out = gr.Textbox(label="Status")
add_btn.click(fn=add_item, inputs=[desc_in, img_in], outputs=add_out)
with gr.Tab("🔍 Search Lost Item"):
query_text = gr.Textbox(label="Search by Text (optional)")
query_img = gr.Image(type="pil", label="Search by Image (optional)")
max_results = gr.Slider(1, 20, step=1, value=5, label="Max Results")
min_score = gr.Slider(0.0, 1.0, step=0.01, value=0.3, label="Min Similarity Score")
search_btn = gr.Button("Search")
results_out = gr.Gallery(label="Search Results").style(grid=2, height="auto")
search_btn.click(fn=search_items, inputs=[query_text, query_img, max_results, min_score], outputs=results_out)
with gr.Tab("⚙️ Admin"):
gr.Markdown("Admin dashboard (future expansion).")
# --------------------------
# Run App
# --------------------------
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|