lostfound-hack / app.py
hackerloi45's picture
fix
afa5ea3
raw
history blame
4.54 kB
import os
import uuid
import tempfile
import gradio as gr
from qdrant_client import QdrantClient
from qdrant_client.models import VectorParams, Distance, PointStruct
from sentence_transformers import SentenceTransformer
from PIL import Image
import torch
import numpy as np
# --------------------------
# Qdrant Cloud Connection
# --------------------------
QDRANT_URL = "https://ff4da494-27b1-413c-ba58-d5ea14932fe1.europe-west3-0.gcp.cloud.qdrant.io"
QDRANT_API_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.98XRKd7ZdDXSfYDl44zbZ_VZ5csnh4tz1JACP62KZds"
COLLECTION_NAME = "lost_and_found"
# CLIP model (text + image embeddings)
MODEL_NAME = "sentence-transformers/clip-ViT-B-32"
embedder = SentenceTransformer(MODEL_NAME)
VECTOR_SIZE = embedder.get_sentence_embedding_dimension()
# Qdrant Client (Cloud)
qclient = QdrantClient(
url=QDRANT_URL,
api_key=QDRANT_API_KEY
)
# Ensure collection exists
qclient.recreate_collection(
collection_name=COLLECTION_NAME,
vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
)
# --------------------------
# Helper Functions
# --------------------------
def embed_text(text: str):
"""Generate embedding for text"""
return embedder.encode(text).tolist()
def embed_image(image: Image.Image):
"""Generate embedding for image"""
img_tensor = embedder.encode(image, convert_to_tensor=True)
return img_tensor.cpu().detach().numpy().tolist()
# --------------------------
# Core Functions
# --------------------------
def add_item(description, image):
"""Add a found item to Qdrant"""
if not description and image is None:
return "⚠️ Please provide description or image."
vectors = []
payload = {"description": description}
if description:
vectors = embed_text(description)
if image:
vectors = embed_image(image)
# Save uploaded image
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
image.save(tmp.name)
payload["image_path"] = tmp.name
point = PointStruct(
id=str(uuid.uuid4()),
vector=vectors,
payload=payload
)
qclient.upsert(collection_name=COLLECTION_NAME, points=[point])
return "βœ… Item added successfully!"
def search_items(query_text, query_image, max_results, min_score):
"""Search lost items by text or image"""
vectors = None
if query_text:
vectors = embed_text(query_text)
elif query_image:
vectors = embed_image(query_image)
else:
return ["⚠️ Provide text or image to search."]
results = qclient.search(
collection_name=COLLECTION_NAME,
query_vector=vectors,
limit=max_results,
score_threshold=min_score,
)
if not results:
return ["No matches found."]
outputs = []
for r in results:
desc = r.payload.get("description", "No description")
img = r.payload.get("image_path", None)
score = round(r.score, 3)
if img:
outputs.append((img, f"{desc} (score: {score})"))
else:
outputs.append((None, f"{desc} (score: {score})"))
return outputs
# --------------------------
# Gradio UI
# --------------------------
with gr.Blocks(theme=gr.themes.Soft()) as demo:
with gr.Tab("βž• Add Found Item"):
with gr.Row():
desc_in = gr.Textbox(label="Item Description")
img_in = gr.Image(type="pil", label="Upload Image")
add_btn = gr.Button("Add Item")
add_out = gr.Textbox(label="Status")
add_btn.click(fn=add_item, inputs=[desc_in, img_in], outputs=add_out)
with gr.Tab("πŸ” Search Lost Item"):
query_text = gr.Textbox(label="Search by Text (optional)")
query_img = gr.Image(type="pil", label="Search by Image (optional)")
max_results = gr.Slider(1, 20, step=1, value=5, label="Max Results")
min_score = gr.Slider(0.0, 1.0, step=0.01, value=0.3, label="Min Similarity Score")
search_btn = gr.Button("Search")
results_out = gr.Gallery(label="Search Results").style(grid=2, height="auto")
search_btn.click(fn=search_items, inputs=[query_text, query_img, max_results, min_score], outputs=results_out)
with gr.Tab("βš™οΈ Admin"):
gr.Markdown("Admin dashboard (future expansion).")
# --------------------------
# Run App
# --------------------------
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)