lostfound-hack / app.py
hackerloi45's picture
fixed+improved UI
109869f
raw
history blame
4.59 kB
import os
import gradio as gr
from qdrant_client import QdrantClient
from qdrant_client.http import models
from sentence_transformers import SentenceTransformer
from PIL import Image
from dotenv import load_dotenv
# Load environment variables from .env
load_dotenv()
# Get Qdrant Cloud credentials (must be in .env or deployment secrets)
QDRANT_URL = os.getenv("https://ff4da494-27b1-413c-ba58-d5ea14932fe1.europe-west3-0.gcp.cloud.qdrant.io:6333")
QDRANT_API_KEY = os.getenv("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.jjeB1JgnUSlb1hOOKMdRpVvMrUER57-udT-X1AWXT1E")
COLLECTION_NAME = "lost_and_found"
# Check creds
if not QDRANT_URL or not QDRANT_API_KEY:
raise RuntimeError(
"❌ Missing Qdrant Cloud credentials. Please set QDRANT_URL and QDRANT_API_KEY in your environment."
)
# Initialize Qdrant client
qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
# Load sentence transformer model
model = SentenceTransformer("clip-ViT-B-32")
# Create collection if it doesn’t exist
if not qclient.collection_exists(COLLECTION_NAME):
qclient.create_collection(
collection_name=COLLECTION_NAME,
vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
)
# Add item function
def add_item(image, description):
if image is None or not description.strip():
return "❌ Please provide both an image and a description."
# Encode description and image
vector_desc = model.encode(description).tolist()
vector_img = model.encode(Image.open(image)).tolist()
# Upload to Qdrant (store both text & image vectors)
qclient.upsert(
collection_name=COLLECTION_NAME,
points=[
models.PointStruct(
id=None,
vector=vector_desc,
payload={"description": description, "image": image.name},
),
models.PointStruct(
id=None,
vector=vector_img,
payload={"description": description, "image": image.name},
),
],
)
return f"βœ… Item added successfully: {description}"
# Search function (text or image)
def search_items(text_query, image_query):
search_results = []
if text_query and text_query.strip():
vector = model.encode(text_query).tolist()
hits = qclient.search(
collection_name=COLLECTION_NAME,
query_vector=vector,
limit=5,
)
search_results.extend(hits)
if image_query is not None:
vector = model.encode(Image.open(image_query)).tolist()
hits = qclient.search(
collection_name=COLLECTION_NAME,
query_vector=vector,
limit=5,
)
search_results.extend(hits)
# Remove duplicates by description
seen = set()
unique_results = []
for hit in search_results:
desc = hit.payload.get("description")
if desc not in seen:
seen.add(desc)
unique_results.append(
(hit.payload.get("image"), hit.payload.get("description"))
)
if not unique_results:
return [("not_found.png", "No match found")]
return unique_results
# Gradio UI
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown(
"""
# πŸ” Lost & Found Database
Upload found items or search using text/image.
"""
)
with gr.Tab("βž• Add Items"):
with gr.Row():
with gr.Column():
add_img = gr.Image(type="filepath", label="Upload Image")
add_desc = gr.Textbox(label="Description", placeholder="e.g. Silver key with round head")
add_btn = gr.Button("Add Item")
add_output = gr.Textbox(label="Status", interactive=False)
add_btn.click(fn=add_item, inputs=[add_img, add_desc], outputs=add_output)
with gr.Tab("πŸ”Ž Search"):
with gr.Row():
with gr.Column():
search_text = gr.Textbox(label="Search by Text", placeholder="e.g. key, wallet, phone")
search_img = gr.Image(type="filepath", label="Or Search by Image")
search_btn = gr.Button("Search")
gallery = gr.Gallery(
label="Matched Items",
show_label=True,
elem_id="gallery",
columns=[3],
rows=[2],
height="auto"
)
search_btn.click(fn=search_items, inputs=[search_text, search_img], outputs=gallery)
# Launch app
if __name__ == "__main__":
demo.launch()