File size: 5,649 Bytes
dd67299
 
7c5c440
03852d5
dd67299
 
7fae8fb
dd67299
03852d5
dd67299
03852d5
7c5c440
22ef1d5
dd67299
 
8f370f4
dd67299
 
 
 
 
 
7c5c440
dd67299
 
 
03852d5
8f370f4
dd67299
 
 
22ef1d5
dd67299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22ef1d5
03852d5
 
 
 
 
7c5c440
03852d5
0c4adc5
22ef1d5
7fae8fb
03852d5
 
 
 
 
 
 
dd67299
 
03852d5
 
 
 
 
7fae8fb
03852d5
22ef1d5
 
746bf5b
dd67299
 
 
 
7c5c440
0d5f8a4
7c5c440
8f370f4
22ef1d5
 
7c5c440
 
 
03852d5
7fae8fb
7c5c440
8f370f4
22ef1d5
8f370f4
03852d5
 
 
 
dd67299
 
 
 
03852d5
8f370f4
 
 
 
a1501eb
22ef1d5
a06f639
7c5c440
dd67299
 
 
 
 
 
 
 
 
 
 
03852d5
dd67299
22ef1d5
dd67299
7c5c440
dd67299
 
 
03852d5
dd67299
 
03852d5
 
 
 
 
dd67299
03852d5
 
 
dd67299
 
 
 
 
22ef1d5
dd67299
 
7c5c440
03852d5
 
dd67299
 
03852d5
7c5c440
dd67299
 
 
 
 
22ef1d5
dd67299
8f370f4
dd67299
8f370f4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import os
import uuid
import gradio as gr
from PIL import Image
from qdrant_client import QdrantClient, models
from transformers import CLIPProcessor, CLIPModel

# ==============================
# Setup
# ==============================
UPLOAD_DIR = "uploaded_images"
os.makedirs(UPLOAD_DIR, exist_ok=True)

qclient = QdrantClient(":memory:")
COLLECTION = "lost_and_found"

# Create collection (with deprecation fix)
if not qclient.collection_exists(COLLECTION):
    qclient.create_collection(
        COLLECTION,
        vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
    )

# Load CLIP
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
clip_proc = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")


# ==============================
# Encode Function
# ==============================
def encode_data(text=None, image=None):
    if text:
        inputs = clip_proc(text=[text], return_tensors="pt", padding=True)
        return clip_model.get_text_features(**inputs).detach().numpy()[0]

    if image:
        inputs = clip_proc(images=image, return_tensors="pt")
        return clip_model.get_image_features(**inputs).detach().numpy()[0]

    raise ValueError("Need either text or image for encoding")


# ==============================
# Add Item
# ==============================
def add_item(mode, text, image, uploader_name, uploader_phone):
    try:
        img_path = None
        if image:
            img_id = str(uuid.uuid4())
            img_path = os.path.join(UPLOAD_DIR, f"{img_id}.png")
            image.save(img_path)

        vector = encode_data(text=text if text else None, image=image if image else None)

        qclient.upsert(
            collection_name=COLLECTION,
            points=[
                models.PointStruct(
                    id=str(uuid.uuid4()),
                    vector=vector.tolist(),
                    payload={
                        "mode": mode,
                        "text": text or "",
                        "uploader_name": uploader_name or "N/A",
                        "uploader_phone": uploader_phone or "N/A",
                        "image_path": img_path,
                        "has_image": bool(image),
                    },
                )
            ],
        )
        return f"βœ… Added successfully as {mode}!"
    except Exception as e:
        return f"❌ Error: {e}"


# ==============================
# Search Items
# ==============================
def search_items(text, image, max_results, min_score):
    try:
        vector = encode_data(text=text if text else None, image=image if image else None)

        results = qclient.search(
            collection_name=COLLECTION,
            query_vector=vector.tolist(),
            limit=max_results,
            score_threshold=min_score,
            with_payload=True,
        )

        texts, imgs = [], []
        for r in results:
            p = r.payload
            desc = (
                f"id:{r.id} | score:{r.score:.3f} | mode:{p.get('mode','')} | text:{p.get('text','')}"
            )

            # Always show uploader details
            uploader_name = p.get("uploader_name", "N/A") or "N/A"
            uploader_phone = p.get("uploader_phone", "N/A") or "N/A"
            desc += f" | uploader:{uploader_name} ({uploader_phone})"

            texts.append(desc)
            if p.get("has_image") and "image_path" in p:
                imgs.append(p["image_path"])
        return "\n".join(texts) if texts else "No matches", imgs
    except Exception as e:
        return f"❌ Error: {e}", []


# ==============================
# Delete All
# ==============================
def clear_all():
    qclient.recreate_collection(
        COLLECTION, vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE)
    )
    return "πŸ—‘οΈ All items cleared."


# ==============================
# Gradio UI
# ==============================
with gr.Blocks() as demo:
    gr.Markdown("# πŸ”‘ Lost & Found Image/Text Search")

    with gr.Tab("βž• Add Item"):
        mode = gr.Radio(["lost", "found"], label="Mode", value="found")
        text_in = gr.Textbox(label="Description (optional)")
        img_in = gr.Image(type="pil", label="Upload Image")
        uploader_name = gr.Textbox(label="Your Name")
        uploader_phone = gr.Textbox(label="Your Phone")
        add_btn = gr.Button("Add to Database")
        add_out = gr.Textbox(label="Status")

        add_btn.click(
            add_item,
            inputs=[mode, text_in, img_in, uploader_name, uploader_phone],
            outputs=add_out,
        )

    with gr.Tab("πŸ” Search"):
        search_text = gr.Textbox(label="Search by text (optional)")
        search_img = gr.Image(type="pil", label="Search by image (optional)")
        max_results = gr.Slider(1, 10, value=5, step=1, label="Max results")
        min_score = gr.Slider(0.5, 1.0, value=0.8, step=0.01, label="Min similarity threshold")
        search_btn = gr.Button("Search")
        search_out = gr.Textbox(label="Search results (text)")
        search_gallery = gr.Gallery(label="Search Results")

        search_btn.click(
            search_items,
            inputs=[search_text, search_img, max_results, min_score],
            outputs=[search_out, search_gallery],
        )

    with gr.Tab("πŸ—‘οΈ Admin"):
        clear_btn = gr.Button("Clear All Items")
        clear_out = gr.Textbox(label="Status")
        clear_btn.click(clear_all, outputs=clear_out)


# ==============================
# Launch
# ==============================
if __name__ == "__main__":
    demo.launch()