File size: 4,588 Bytes
226d235
7c5c440
7116b90
226d235
c559bc7
294389b
226d235
afa5ea3
109869f
226d235
27514c1
109869f
226d235
 
 
27514c1
109869f
 
 
 
 
 
 
 
 
226d235
 
afa5ea3
226d235
 
 
 
 
 
afa5ea3
226d235
 
 
 
afa5ea3
226d235
 
 
7fae8fb
109869f
666646e
 
 
226d235
 
 
 
 
 
 
 
 
 
 
666646e
afa5ea3
226d235
afa5ea3
226d235
 
 
afa5ea3
226d235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c5c440
226d235
 
afa5ea3
226d235
666646e
226d235
47edb75
 
 
226d235
 
47edb75
 
666646e
226d235
666646e
226d235
 
 
 
 
 
 
 
666646e
226d235
 
 
 
 
 
 
 
 
 
 
47edb75
226d235
666646e
226d235
7116b90
226d235
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import os
import gradio as gr
from qdrant_client import QdrantClient
from qdrant_client.http import models
from sentence_transformers import SentenceTransformer
from PIL import Image
from dotenv import load_dotenv

# Load environment variables from .env
load_dotenv()

# Get Qdrant Cloud credentials (must be in .env or deployment secrets)
QDRANT_URL = os.getenv("https://ff4da494-27b1-413c-ba58-d5ea14932fe1.europe-west3-0.gcp.cloud.qdrant.io:6333")
QDRANT_API_KEY = os.getenv("eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.jjeB1JgnUSlb1hOOKMdRpVvMrUER57-udT-X1AWXT1E")
COLLECTION_NAME = "lost_and_found"

# Check creds
if not QDRANT_URL or not QDRANT_API_KEY:
    raise RuntimeError(
        "❌ Missing Qdrant Cloud credentials. Please set QDRANT_URL and QDRANT_API_KEY in your environment."
    )

# Initialize Qdrant client
qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)

# Load sentence transformer model
model = SentenceTransformer("clip-ViT-B-32")

# Create collection if it doesn’t exist
if not qclient.collection_exists(COLLECTION_NAME):
    qclient.create_collection(
        collection_name=COLLECTION_NAME,
        vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
    )

# Add item function
def add_item(image, description):
    if image is None or not description.strip():
        return "❌ Please provide both an image and a description."

    # Encode description and image
    vector_desc = model.encode(description).tolist()
    vector_img = model.encode(Image.open(image)).tolist()

    # Upload to Qdrant (store both text & image vectors)
    qclient.upsert(
        collection_name=COLLECTION_NAME,
        points=[
            models.PointStruct(
                id=None,
                vector=vector_desc,
                payload={"description": description, "image": image.name},
            ),
            models.PointStruct(
                id=None,
                vector=vector_img,
                payload={"description": description, "image": image.name},
            ),
        ],
    )

    return f"✅ Item added successfully: {description}"

# Search function (text or image)
def search_items(text_query, image_query):
    search_results = []

    if text_query and text_query.strip():
        vector = model.encode(text_query).tolist()
        hits = qclient.search(
            collection_name=COLLECTION_NAME,
            query_vector=vector,
            limit=5,
        )
        search_results.extend(hits)

    if image_query is not None:
        vector = model.encode(Image.open(image_query)).tolist()
        hits = qclient.search(
            collection_name=COLLECTION_NAME,
            query_vector=vector,
            limit=5,
        )
        search_results.extend(hits)

    # Remove duplicates by description
    seen = set()
    unique_results = []
    for hit in search_results:
        desc = hit.payload.get("description")
        if desc not in seen:
            seen.add(desc)
            unique_results.append(
                (hit.payload.get("image"), hit.payload.get("description"))
            )

    if not unique_results:
        return [("not_found.png", "No match found")]

    return unique_results

# Gradio UI
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown(
        """
        # 🔍 Lost & Found Database  
        Upload found items or search using text/image.
        """
    )

    with gr.Tab("➕ Add Items"):
        with gr.Row():
            with gr.Column():
                add_img = gr.Image(type="filepath", label="Upload Image")
                add_desc = gr.Textbox(label="Description", placeholder="e.g. Silver key with round head")
                add_btn = gr.Button("Add Item")
                add_output = gr.Textbox(label="Status", interactive=False)
        add_btn.click(fn=add_item, inputs=[add_img, add_desc], outputs=add_output)

    with gr.Tab("🔎 Search"):
        with gr.Row():
            with gr.Column():
                search_text = gr.Textbox(label="Search by Text", placeholder="e.g. key, wallet, phone")
                search_img = gr.Image(type="filepath", label="Or Search by Image")
                search_btn = gr.Button("Search")
        gallery = gr.Gallery(
            label="Matched Items",
            show_label=True,
            elem_id="gallery",
            columns=[3],
            rows=[2],
            height="auto"
        )
        search_btn.click(fn=search_items, inputs=[search_text, search_img], outputs=gallery)

# Launch app
if __name__ == "__main__":
    demo.launch()