hackerloi45 commited on
Commit
dd67299
Β·
1 Parent(s): 03852d5
Files changed (1) hide show
  1. app.py +80 -78
app.py CHANGED
@@ -1,50 +1,50 @@
 
 
1
  import gradio as gr
2
- from qdrant_client import QdrantClient
3
- from qdrant_client.http import models
4
- from sentence_transformers import SentenceTransformer
5
  from PIL import Image
6
- import uuid
7
- import os
8
 
9
- # ------------------------------
10
  # Setup
11
- # ------------------------------
12
- COLLECTION = "lost_and_found"
13
  UPLOAD_DIR = "uploaded_images"
14
  os.makedirs(UPLOAD_DIR, exist_ok=True)
15
 
16
- # Connect to Qdrant (local or remote)
17
- qclient = QdrantClient(path="qdrant_db")
18
 
19
- # SentenceTransformer model
20
- model = SentenceTransformer("clip-ViT-B-32")
 
 
 
 
21
 
22
- # Recreate collection
23
- if qclient.collection_exists(COLLECTION):
24
- qclient.delete_collection(COLLECTION)
25
 
26
- qclient.create_collection(
27
- COLLECTION,
28
- vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
29
- )
30
 
31
- # ------------------------------
32
- # Helper functions
33
- # ------------------------------
34
  def encode_data(text=None, image=None):
35
- """Encode text or image into vector"""
36
- if text and image:
37
- text_vec = model.encode([text], convert_to_tensor=False)[0]
38
- img_vec = model.encode([image], convert_to_tensor=False)[0]
39
- return (text_vec + img_vec) / 2
40
- elif text:
41
- return model.encode([text], convert_to_tensor=False)[0]
42
- elif image:
43
- return model.encode([image], convert_to_tensor=False)[0]
44
- else:
45
- raise ValueError("No input provided")
46
-
47
- def add_item(mode, text, image, finder_name, finder_phone):
 
 
48
  try:
49
  img_path = None
50
  if image:
@@ -63,8 +63,8 @@ def add_item(mode, text, image, finder_name, finder_phone):
63
  payload={
64
  "mode": mode,
65
  "text": text or "",
66
- "finder_name": finder_name if mode == "found" else "",
67
- "finder_phone": finder_phone if mode == "found" else "",
68
  "image_path": img_path,
69
  "has_image": bool(image),
70
  },
@@ -75,6 +75,10 @@ def add_item(mode, text, image, finder_name, finder_phone):
75
  except Exception as e:
76
  return f"❌ Error: {e}"
77
 
 
 
 
 
78
  def search_items(text, image, max_results, min_score):
79
  try:
80
  vector = encode_data(text=text if text else None, image=image if image else None)
@@ -94,10 +98,10 @@ def search_items(text, image, max_results, min_score):
94
  f"id:{r.id} | score:{r.score:.3f} | mode:{p.get('mode','')} | text:{p.get('text','')}"
95
  )
96
 
97
- # Always show finder info
98
- finder_name = p.get("finder_name", "N/A") or "N/A"
99
- finder_phone = p.get("finder_phone", "N/A") or "N/A"
100
- desc += f" | finder:{finder_name} ({finder_phone})"
101
 
102
  texts.append(desc)
103
  if p.get("has_image") and "image_path" in p:
@@ -106,63 +110,61 @@ def search_items(text, image, max_results, min_score):
106
  except Exception as e:
107
  return f"❌ Error: {e}", []
108
 
109
- def delete_all():
110
- try:
111
- qclient.delete_collection(COLLECTION)
112
- qclient.create_collection(
113
- COLLECTION,
114
- vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
115
- )
116
- return "πŸ—‘οΈ All items deleted!"
117
- except Exception as e:
118
- return f"❌ Error: {e}"
119
 
120
- # ------------------------------
 
 
 
 
 
 
 
 
 
 
121
  # Gradio UI
122
- # ------------------------------
123
  with gr.Blocks() as demo:
124
- gr.Markdown("## πŸ”‘ Lost & Found System")
125
 
126
- with gr.Tab("Add Item"):
127
- mode = gr.Radio(["lost", "found"], label="Mode", value="lost")
128
- text_in = gr.Textbox(label="Description")
129
  img_in = gr.Image(type="pil", label="Upload Image")
130
-
131
- finder_name = gr.Textbox(label="Finder Name (only if found)")
132
- finder_phone = gr.Textbox(label="Finder Phone (only if found)")
133
-
134
  add_btn = gr.Button("Add to Database")
135
  add_out = gr.Textbox(label="Status")
136
 
137
  add_btn.click(
138
  add_item,
139
- inputs=[mode, text_in, img_in, finder_name, finder_phone],
140
  outputs=add_out,
141
  )
142
 
143
- with gr.Tab("Search"):
144
- text_s = gr.Textbox(label="Search by text (optional)")
145
- img_s = gr.Image(type="pil", label="Search by image (optional)")
146
- max_r = gr.Slider(1, 10, value=5, step=1, label="Max results")
147
- min_s = gr.Slider(0.5, 1.0, value=0.8, step=0.01, label="Min similarity threshold")
148
-
149
  search_btn = gr.Button("Search")
150
- search_out_text = gr.Textbox(label="Search results (text)")
151
- search_gallery = gr.Gallery(label="Search Results", columns=2, height="auto")
152
 
153
  search_btn.click(
154
  search_items,
155
- inputs=[text_s, img_s, max_r, min_s],
156
- outputs=[search_out_text, search_gallery],
157
  )
158
 
159
- with gr.Tab("Admin"):
160
- del_btn = gr.Button("Delete All Items")
161
- del_out = gr.Textbox(label="Status")
162
- del_btn.click(delete_all, outputs=del_out)
 
163
 
164
- # ------------------------------
165
  # Launch
166
- # ------------------------------
167
  if __name__ == "__main__":
168
  demo.launch()
 
1
+ import os
2
+ import uuid
3
  import gradio as gr
 
 
 
4
  from PIL import Image
5
+ from qdrant_client import QdrantClient, models
6
+ from transformers import CLIPProcessor, CLIPModel
7
 
8
+ # ==============================
9
  # Setup
10
+ # ==============================
 
11
  UPLOAD_DIR = "uploaded_images"
12
  os.makedirs(UPLOAD_DIR, exist_ok=True)
13
 
14
+ qclient = QdrantClient(":memory:")
15
+ COLLECTION = "lost_and_found"
16
 
17
+ # Create collection (with deprecation fix)
18
+ if not qclient.collection_exists(COLLECTION):
19
+ qclient.create_collection(
20
+ COLLECTION,
21
+ vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
22
+ )
23
 
24
+ # Load CLIP
25
+ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
26
+ clip_proc = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
27
 
 
 
 
 
28
 
29
+ # ==============================
30
+ # Encode Function
31
+ # ==============================
32
  def encode_data(text=None, image=None):
33
+ if text:
34
+ inputs = clip_proc(text=[text], return_tensors="pt", padding=True)
35
+ return clip_model.get_text_features(**inputs).detach().numpy()[0]
36
+
37
+ if image:
38
+ inputs = clip_proc(images=image, return_tensors="pt")
39
+ return clip_model.get_image_features(**inputs).detach().numpy()[0]
40
+
41
+ raise ValueError("Need either text or image for encoding")
42
+
43
+
44
+ # ==============================
45
+ # Add Item
46
+ # ==============================
47
+ def add_item(mode, text, image, uploader_name, uploader_phone):
48
  try:
49
  img_path = None
50
  if image:
 
63
  payload={
64
  "mode": mode,
65
  "text": text or "",
66
+ "uploader_name": uploader_name or "N/A",
67
+ "uploader_phone": uploader_phone or "N/A",
68
  "image_path": img_path,
69
  "has_image": bool(image),
70
  },
 
75
  except Exception as e:
76
  return f"❌ Error: {e}"
77
 
78
+
79
+ # ==============================
80
+ # Search Items
81
+ # ==============================
82
  def search_items(text, image, max_results, min_score):
83
  try:
84
  vector = encode_data(text=text if text else None, image=image if image else None)
 
98
  f"id:{r.id} | score:{r.score:.3f} | mode:{p.get('mode','')} | text:{p.get('text','')}"
99
  )
100
 
101
+ # Always show uploader details
102
+ uploader_name = p.get("uploader_name", "N/A") or "N/A"
103
+ uploader_phone = p.get("uploader_phone", "N/A") or "N/A"
104
+ desc += f" | uploader:{uploader_name} ({uploader_phone})"
105
 
106
  texts.append(desc)
107
  if p.get("has_image") and "image_path" in p:
 
110
  except Exception as e:
111
  return f"❌ Error: {e}", []
112
 
 
 
 
 
 
 
 
 
 
 
113
 
114
+ # ==============================
115
+ # Delete All
116
+ # ==============================
117
+ def clear_all():
118
+ qclient.recreate_collection(
119
+ COLLECTION, vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE)
120
+ )
121
+ return "πŸ—‘οΈ All items cleared."
122
+
123
+
124
+ # ==============================
125
  # Gradio UI
126
+ # ==============================
127
  with gr.Blocks() as demo:
128
+ gr.Markdown("# πŸ”‘ Lost & Found Image/Text Search")
129
 
130
+ with gr.Tab("βž• Add Item"):
131
+ mode = gr.Radio(["lost", "found"], label="Mode", value="found")
132
+ text_in = gr.Textbox(label="Description (optional)")
133
  img_in = gr.Image(type="pil", label="Upload Image")
134
+ uploader_name = gr.Textbox(label="Your Name")
135
+ uploader_phone = gr.Textbox(label="Your Phone")
 
 
136
  add_btn = gr.Button("Add to Database")
137
  add_out = gr.Textbox(label="Status")
138
 
139
  add_btn.click(
140
  add_item,
141
+ inputs=[mode, text_in, img_in, uploader_name, uploader_phone],
142
  outputs=add_out,
143
  )
144
 
145
+ with gr.Tab("πŸ” Search"):
146
+ search_text = gr.Textbox(label="Search by text (optional)")
147
+ search_img = gr.Image(type="pil", label="Search by image (optional)")
148
+ max_results = gr.Slider(1, 10, value=5, step=1, label="Max results")
149
+ min_score = gr.Slider(0.5, 1.0, value=0.8, step=0.01, label="Min similarity threshold")
 
150
  search_btn = gr.Button("Search")
151
+ search_out = gr.Textbox(label="Search results (text)")
152
+ search_gallery = gr.Gallery(label="Search Results")
153
 
154
  search_btn.click(
155
  search_items,
156
+ inputs=[search_text, search_img, max_results, min_score],
157
+ outputs=[search_out, search_gallery],
158
  )
159
 
160
+ with gr.Tab("πŸ—‘οΈ Admin"):
161
+ clear_btn = gr.Button("Clear All Items")
162
+ clear_out = gr.Textbox(label="Status")
163
+ clear_btn.click(clear_all, outputs=clear_out)
164
+
165
 
166
+ # ==============================
167
  # Launch
168
+ # ==============================
169
  if __name__ == "__main__":
170
  demo.launch()