hackerloi45 commited on
Commit
03852d5
Β·
1 Parent(s): 8f370f4
Files changed (1) hide show
  1. app.py +112 -101
app.py CHANGED
@@ -1,91 +1,80 @@
1
- import os
2
- import uuid
3
  import gradio as gr
4
- import numpy as np
5
- from PIL import Image
6
  from qdrant_client import QdrantClient
7
- from qdrant_client.http.models import Distance, VectorParams, PointStruct
8
  from sentence_transformers import SentenceTransformer
 
 
 
9
 
10
- # =========================
11
- # CONFIG
12
- # =========================
13
- UPLOAD_DIR = "uploads"
 
14
  os.makedirs(UPLOAD_DIR, exist_ok=True)
15
 
16
- COLLECTION = "lostfound"
17
-
18
- # Connect to Qdrant (local or remote if deployed)
19
- qclient = QdrantClient(":memory:") # use ":memory:" for demo, change for persistent DB
20
 
21
- # Create collection only if missing
22
- if not qclient.collection_exists(COLLECTION):
23
- qclient.create_collection(
24
- collection_name=COLLECTION,
25
- vectors_config=VectorParams(size=512, distance=Distance.COSINE)
26
- )
27
-
28
- # Load CLIP model
29
  model = SentenceTransformer("clip-ViT-B-32")
30
 
 
 
 
 
 
 
 
 
31
 
32
- # =========================
33
- # ENCODING FUNCTION
34
- # =========================
35
  def encode_data(text=None, image=None):
36
- if image is not None:
37
- if isinstance(image, str): # path
38
- img = Image.open(image).convert("RGB")
39
- else: # PIL.Image
40
- img = image.convert("RGB")
41
- emb = model.encode(img, convert_to_numpy=True, normalize_embeddings=True)
42
  elif text:
43
- emb = model.encode(text, convert_to_numpy=True, normalize_embeddings=True)
 
 
44
  else:
45
- raise ValueError("Need text or image")
46
- return emb.astype(np.float32)
47
 
48
-
49
- # =========================
50
- # ADD ITEM
51
- # =========================
52
- def add_item(mode, text, image, name, phone):
53
  try:
54
- vector = encode_data(text=text if text else None, image=image if image else None)
 
 
 
 
55
 
56
- payload = {
57
- "mode": mode,
58
- "text": text,
59
- "has_image": image is not None,
60
- }
61
-
62
- # Save image if uploaded
63
- if image is not None:
64
- if isinstance(image, str):
65
- img = Image.open(image).convert("RGB")
66
- else:
67
- img = image.convert("RGB")
68
- fname = f"{uuid.uuid4().hex}.png"
69
- fpath = os.path.join(UPLOAD_DIR, fname)
70
- img.save(fpath)
71
- payload["image_path"] = fpath
72
-
73
- if mode == "found":
74
- payload["finder_name"] = name
75
- payload["finder_phone"] = phone
76
 
77
  qclient.upsert(
78
  collection_name=COLLECTION,
79
- points=[PointStruct(id=str(uuid.uuid4()), vector=vector.tolist(), payload=payload)]
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  )
81
- return "βœ… Item added successfully!"
82
  except Exception as e:
83
  return f"❌ Error: {e}"
84
 
85
-
86
- # =========================
87
- # SEARCH FUNCTION
88
- # =========================
89
  def search_items(text, image, max_results, min_score):
90
  try:
91
  vector = encode_data(text=text if text else None, image=image if image else None)
@@ -95,15 +84,21 @@ def search_items(text, image, max_results, min_score):
95
  query_vector=vector.tolist(),
96
  limit=max_results,
97
  score_threshold=min_score,
98
- with_payload=True
99
  )
100
 
101
  texts, imgs = [], []
102
  for r in results:
103
  p = r.payload
104
- desc = f"id:{r.id} | score:{r.score:.3f} | mode:{p.get('mode','')} | text:{p.get('text','')}"
105
- if p.get("mode") == "found":
106
- desc += f" | finder:{p.get('finder_name','')} ({p.get('finder_phone','')})"
 
 
 
 
 
 
107
  texts.append(desc)
108
  if p.get("has_image") and "image_path" in p:
109
  imgs.append(p["image_path"])
@@ -111,47 +106,63 @@ def search_items(text, image, max_results, min_score):
111
  except Exception as e:
112
  return f"❌ Error: {e}", []
113
 
 
 
 
 
 
 
 
 
 
 
114
 
115
- # =========================
116
- # CLEAR DATABASE
117
- # =========================
118
- def clear_all_items():
119
- qclient.delete(collection_name=COLLECTION, points_selector={"filter": {}})
120
- return "πŸ—‘οΈ All items deleted!"
121
-
122
-
123
- # =========================
124
- # GRADIO APP
125
- # =========================
126
  with gr.Blocks() as demo:
127
- gr.Markdown("# πŸ” Lost & Found Search with Images + Text")
128
 
129
- with gr.Tab("βž• Add Item"):
130
  mode = gr.Radio(["lost", "found"], label="Mode", value="lost")
131
- text = gr.Textbox(label="Description")
132
- image = gr.Image(type="pil", label="Upload Image")
133
- name = gr.Textbox(label="Finder Name (only if found)")
134
- phone = gr.Textbox(label="Finder Phone (only if found)")
135
- add_btn = gr.Button("Add Item")
136
- add_output = gr.Textbox(label="Add result")
137
- add_btn.click(add_item, [mode, text, image, name, phone], add_output)
138
-
139
- with gr.Tab("πŸ”Ž Search"):
140
- s_text = gr.Textbox(label="Search by text (optional)")
141
- s_image = gr.Image(type="pil", label="Search by image (optional)")
142
- max_results = gr.Slider(1, 10, value=5, step=1, label="Max results")
143
- min_score = gr.Slider(0.5, 1.0, value=0.9, step=0.01, label="Min similarity threshold")
 
 
 
 
 
 
 
 
144
  search_btn = gr.Button("Search")
145
- search_text = gr.Textbox(label="Search results (text)")
146
  search_gallery = gr.Gallery(label="Search Results", columns=2, height="auto")
147
- search_btn.click(search_items, [s_text, s_image, max_results, min_score], [search_text, search_gallery])
148
 
149
- with gr.Tab("πŸ—‘οΈ Admin"):
150
- clear_btn = gr.Button("Clear ALL items")
151
- clear_out = gr.Textbox(label="Status")
152
- clear_btn.click(clear_all_items, outputs=clear_out)
 
153
 
 
 
 
 
154
 
 
155
  # Launch
 
156
  if __name__ == "__main__":
157
  demo.launch()
 
 
 
1
  import gradio as gr
 
 
2
  from qdrant_client import QdrantClient
3
+ from qdrant_client.http import models
4
  from sentence_transformers import SentenceTransformer
5
+ from PIL import Image
6
+ import uuid
7
+ import os
8
 
9
+ # ------------------------------
10
+ # Setup
11
+ # ------------------------------
12
+ COLLECTION = "lost_and_found"
13
+ UPLOAD_DIR = "uploaded_images"
14
  os.makedirs(UPLOAD_DIR, exist_ok=True)
15
 
16
+ # Connect to Qdrant (local or remote)
17
+ qclient = QdrantClient(path="qdrant_db")
 
 
18
 
19
+ # SentenceTransformer model
 
 
 
 
 
 
 
20
  model = SentenceTransformer("clip-ViT-B-32")
21
 
22
+ # Recreate collection
23
+ if qclient.collection_exists(COLLECTION):
24
+ qclient.delete_collection(COLLECTION)
25
+
26
+ qclient.create_collection(
27
+ COLLECTION,
28
+ vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
29
+ )
30
 
31
+ # ------------------------------
32
+ # Helper functions
33
+ # ------------------------------
34
  def encode_data(text=None, image=None):
35
+ """Encode text or image into vector"""
36
+ if text and image:
37
+ text_vec = model.encode([text], convert_to_tensor=False)[0]
38
+ img_vec = model.encode([image], convert_to_tensor=False)[0]
39
+ return (text_vec + img_vec) / 2
 
40
  elif text:
41
+ return model.encode([text], convert_to_tensor=False)[0]
42
+ elif image:
43
+ return model.encode([image], convert_to_tensor=False)[0]
44
  else:
45
+ raise ValueError("No input provided")
 
46
 
47
+ def add_item(mode, text, image, finder_name, finder_phone):
 
 
 
 
48
  try:
49
+ img_path = None
50
+ if image:
51
+ img_id = str(uuid.uuid4())
52
+ img_path = os.path.join(UPLOAD_DIR, f"{img_id}.png")
53
+ image.save(img_path)
54
 
55
+ vector = encode_data(text=text if text else None, image=image if image else None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  qclient.upsert(
58
  collection_name=COLLECTION,
59
+ points=[
60
+ models.PointStruct(
61
+ id=str(uuid.uuid4()),
62
+ vector=vector.tolist(),
63
+ payload={
64
+ "mode": mode,
65
+ "text": text or "",
66
+ "finder_name": finder_name if mode == "found" else "",
67
+ "finder_phone": finder_phone if mode == "found" else "",
68
+ "image_path": img_path,
69
+ "has_image": bool(image),
70
+ },
71
+ )
72
+ ],
73
  )
74
+ return f"βœ… Added successfully as {mode}!"
75
  except Exception as e:
76
  return f"❌ Error: {e}"
77
 
 
 
 
 
78
  def search_items(text, image, max_results, min_score):
79
  try:
80
  vector = encode_data(text=text if text else None, image=image if image else None)
 
84
  query_vector=vector.tolist(),
85
  limit=max_results,
86
  score_threshold=min_score,
87
+ with_payload=True,
88
  )
89
 
90
  texts, imgs = [], []
91
  for r in results:
92
  p = r.payload
93
+ desc = (
94
+ f"id:{r.id} | score:{r.score:.3f} | mode:{p.get('mode','')} | text:{p.get('text','')}"
95
+ )
96
+
97
+ # Always show finder info
98
+ finder_name = p.get("finder_name", "N/A") or "N/A"
99
+ finder_phone = p.get("finder_phone", "N/A") or "N/A"
100
+ desc += f" | finder:{finder_name} ({finder_phone})"
101
+
102
  texts.append(desc)
103
  if p.get("has_image") and "image_path" in p:
104
  imgs.append(p["image_path"])
 
106
  except Exception as e:
107
  return f"❌ Error: {e}", []
108
 
109
+ def delete_all():
110
+ try:
111
+ qclient.delete_collection(COLLECTION)
112
+ qclient.create_collection(
113
+ COLLECTION,
114
+ vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
115
+ )
116
+ return "πŸ—‘οΈ All items deleted!"
117
+ except Exception as e:
118
+ return f"❌ Error: {e}"
119
 
120
+ # ------------------------------
121
+ # Gradio UI
122
+ # ------------------------------
 
 
 
 
 
 
 
 
123
  with gr.Blocks() as demo:
124
+ gr.Markdown("## πŸ”‘ Lost & Found System")
125
 
126
+ with gr.Tab("Add Item"):
127
  mode = gr.Radio(["lost", "found"], label="Mode", value="lost")
128
+ text_in = gr.Textbox(label="Description")
129
+ img_in = gr.Image(type="pil", label="Upload Image")
130
+
131
+ finder_name = gr.Textbox(label="Finder Name (only if found)")
132
+ finder_phone = gr.Textbox(label="Finder Phone (only if found)")
133
+
134
+ add_btn = gr.Button("Add to Database")
135
+ add_out = gr.Textbox(label="Status")
136
+
137
+ add_btn.click(
138
+ add_item,
139
+ inputs=[mode, text_in, img_in, finder_name, finder_phone],
140
+ outputs=add_out,
141
+ )
142
+
143
+ with gr.Tab("Search"):
144
+ text_s = gr.Textbox(label="Search by text (optional)")
145
+ img_s = gr.Image(type="pil", label="Search by image (optional)")
146
+ max_r = gr.Slider(1, 10, value=5, step=1, label="Max results")
147
+ min_s = gr.Slider(0.5, 1.0, value=0.8, step=0.01, label="Min similarity threshold")
148
+
149
  search_btn = gr.Button("Search")
150
+ search_out_text = gr.Textbox(label="Search results (text)")
151
  search_gallery = gr.Gallery(label="Search Results", columns=2, height="auto")
 
152
 
153
+ search_btn.click(
154
+ search_items,
155
+ inputs=[text_s, img_s, max_r, min_s],
156
+ outputs=[search_out_text, search_gallery],
157
+ )
158
 
159
+ with gr.Tab("Admin"):
160
+ del_btn = gr.Button("Delete All Items")
161
+ del_out = gr.Textbox(label="Status")
162
+ del_btn.click(delete_all, outputs=del_out)
163
 
164
+ # ------------------------------
165
  # Launch
166
+ # ------------------------------
167
  if __name__ == "__main__":
168
  demo.launch()