hackerloi45 commited on
Commit
afa5ea3
·
1 Parent(s): 294389b
Files changed (1) hide show
  1. app.py +120 -181
app.py CHANGED
@@ -1,202 +1,141 @@
1
  import os
 
 
2
  import gradio as gr
3
  from qdrant_client import QdrantClient
4
- from qdrant_client.http import models as rest
5
- from qdrant_client.http.models import Distance, VectorParams
6
  from sentence_transformers import SentenceTransformer
7
- from transformers import CLIPProcessor, CLIPModel
8
  from PIL import Image
9
- import uuid
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- # ----------------------------
12
- # Qdrant Setup
13
- # ----------------------------
14
- QDRANT_HOST = "localhost"
15
- QDRANT_PORT = 6333
16
- COLLECTION = "lost_and_found"
17
-
18
- qclient = QdrantClient(QDRANT_HOST, port=QDRANT_PORT)
19
-
20
- # Load Models
21
- text_model = SentenceTransformer("all-MiniLM-L6-v2")
22
- clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
23
- clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
24
-
25
- # Embedding sizes
26
- TEXT_VECTOR_SIZE = text_model.get_sentence_embedding_dimension()
27
- IMAGE_VECTOR_SIZE = clip_model.config.projection_dim
28
-
29
- # Create collection if not exists
30
- try:
31
- qclient.get_collection(COLLECTION)
32
- except Exception:
33
- qclient.create_collection(
34
- COLLECTION,
35
- vectors_config={
36
- "text": VectorParams(size=TEXT_VECTOR_SIZE, distance=Distance.COSINE),
37
- "image": VectorParams(size=IMAGE_VECTOR_SIZE, distance=Distance.COSINE),
38
- },
39
  )
40
 
41
- # ----------------------------
42
- # Encoding Helpers
43
- # ----------------------------
44
- def encode_text(text: str):
45
- return text_model.encode([text])[0]
46
-
47
- def encode_image(image: Image.Image):
48
- inputs = clip_processor(images=image, return_tensors="pt")
49
- with torch.no_grad():
50
- emb = clip_model.get_image_features(**inputs)
51
- return emb[0].cpu().numpy()
52
-
53
- # ----------------------------
54
- # Add Found Item
55
- # ----------------------------
56
- def add_item(text, image, uploader_name, uploader_phone):
57
- try:
58
- if not text and image is None:
59
- return "❌ Please provide a description or an image."
60
-
61
- text_vector = encode_text(text) if text else None
62
- image_vector = encode_image(image) if image is not None else None
63
 
64
- # Save uploaded image
65
- img_path = None
66
- if image is not None:
67
- os.makedirs("uploaded_images", exist_ok=True)
68
- img_id = str(uuid.uuid4()) + ".png"
69
- img_path = os.path.join("uploaded_images", img_id)
70
- image.save(img_path)
71
-
72
- vectors = {}
73
- if text_vector is not None:
74
- vectors["text"] = text_vector.tolist()
75
- if image_vector is not None:
76
- vectors["image"] = image_vector.tolist()
77
-
78
- qclient.upsert(
79
- collection_name=COLLECTION,
80
- points=[
81
- rest.PointStruct(
82
- id=str(uuid.uuid4()),
83
- vector=vectors,
84
- payload={
85
- "text": text,
86
- "image_path": img_path,
87
- "uploader_name": uploader_name or "N/A",
88
- "uploader_phone": uploader_phone or "N/A",
89
- },
90
- )
91
- ],
92
- )
93
- return "✅ Item added successfully!"
94
- except Exception as e:
95
- return f"❌ Error adding item: {e}"
96
-
97
- # ----------------------------
98
- # Search Lost Items
99
- # ----------------------------
100
- def search_items(text, image, max_results, min_score):
101
- try:
102
- vector = None
103
- query_text = text.strip() if text else ""
104
-
105
- if isinstance(image, Image.Image):
106
- vector = encode_image(image)
107
- elif text:
108
- vector = encode_text(text)
109
-
110
- results = []
111
-
112
- # 1. Vector search
113
- if vector is not None:
114
- results = qclient.search(
115
- collection_name=COLLECTION,
116
- query_vector=vector.tolist(),
117
- limit=int(max_results),
118
- score_threshold=float(min_score),
119
- with_payload=True,
120
- )
121
-
122
- # 2. Fallback text search on payload
123
- if query_text:
124
- keyword_results = qclient.scroll(
125
- collection_name=COLLECTION,
126
- scroll_filter=rest.Filter(
127
- must=[rest.FieldCondition(
128
- key="text",
129
- match=rest.MatchText(text=query_text)
130
- )]
131
- ),
132
- limit=100,
133
- with_payload=True
134
- )[0]
135
-
136
- existing_ids = {r.id for r in results}
137
- for km in keyword_results:
138
- if km.id not in existing_ids:
139
- km.score = 1.0
140
- results.append(km)
141
-
142
- if not results:
143
- return "No matches found.", []
144
-
145
- # Format output
146
- text_out, gallery = [], []
147
- for r in results[:max_results]:
148
- payload = r.payload or {}
149
- score = getattr(r, "score", 0)
150
- uploader_name = payload.get("uploader_name", "N/A")
151
- uploader_phone = payload.get("uploader_phone", "N/A")
152
- desc = (
153
- f"id:{r.id} | score:{score:.3f} | "
154
- f"text:{payload.get('text','')} | "
155
- f"finder:{uploader_name} ({uploader_phone})"
156
- )
157
- text_out.append(desc)
158
- img_path = payload.get("image_path")
159
- if img_path and os.path.exists(img_path):
160
- gallery.append(img_path)
161
-
162
- return "\n".join(text_out), gallery
163
- except Exception as e:
164
- return f"❌ Error: {e}", []
165
-
166
- # ----------------------------
167
  # Gradio UI
168
- # ----------------------------
169
- with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
170
  with gr.Tab("➕ Add Found Item"):
171
- desc_in = gr.Textbox(label="Description", placeholder="Describe the item...")
172
- img_in = gr.Image(label="Upload Image", type="pil")
173
- uploader_name = gr.Textbox(label="Finder's Name")
174
- uploader_phone = gr.Textbox(label="Finder's Phone")
175
  add_btn = gr.Button("Add Item")
176
  add_out = gr.Textbox(label="Status")
177
- add_btn.click(
178
- add_item,
179
- inputs=[desc_in, img_in, uploader_name, uploader_phone],
180
- outputs=[add_out]
181
- )
182
 
183
  with gr.Tab("🔍 Search Lost Item"):
184
- text_in = gr.Textbox(label="Search by Text (optional)")
185
- img_in_search = gr.Image(label="Search by Image (optional)", type="pil")
186
- max_res = gr.Slider(1, 20, value=5, step=1, label="Max Results")
187
- min_score = gr.Slider(0, 1, value=0.3, step=0.01, label="Min Similarity Score")
188
  search_btn = gr.Button("Search")
189
- result_text = gr.Textbox(label="Search Results (Text)")
190
- result_gallery = gr.Gallery(label="Search Results (Images)").style(grid=3)
191
- search_btn.click(
192
- search_items,
193
- inputs=[text_in, img_in_search, max_res, min_score],
194
- outputs=[result_text, result_gallery]
195
- )
196
 
197
  with gr.Tab("⚙️ Admin"):
198
- gr.Markdown("Admin dashboard placeholder...")
199
 
 
 
 
200
  if __name__ == "__main__":
201
- import torch
202
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import os
2
+ import uuid
3
+ import tempfile
4
  import gradio as gr
5
  from qdrant_client import QdrantClient
6
+ from qdrant_client.models import VectorParams, Distance, PointStruct
 
7
  from sentence_transformers import SentenceTransformer
 
8
  from PIL import Image
9
+ import torch
10
+ import numpy as np
11
+
12
+ # --------------------------
13
+ # Qdrant Cloud Connection
14
+ # --------------------------
15
+ QDRANT_URL = "https://ff4da494-27b1-413c-ba58-d5ea14932fe1.europe-west3-0.gcp.cloud.qdrant.io"
16
+ QDRANT_API_KEY = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIn0.98XRKd7ZdDXSfYDl44zbZ_VZ5csnh4tz1JACP62KZds"
17
+ COLLECTION_NAME = "lost_and_found"
18
+
19
+ # CLIP model (text + image embeddings)
20
+ MODEL_NAME = "sentence-transformers/clip-ViT-B-32"
21
+ embedder = SentenceTransformer(MODEL_NAME)
22
+ VECTOR_SIZE = embedder.get_sentence_embedding_dimension()
23
+
24
+ # Qdrant Client (Cloud)
25
+ qclient = QdrantClient(
26
+ url=QDRANT_URL,
27
+ api_key=QDRANT_API_KEY
28
+ )
29
+
30
+ # Ensure collection exists
31
+ qclient.recreate_collection(
32
+ collection_name=COLLECTION_NAME,
33
+ vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
34
+ )
35
+
36
+ # --------------------------
37
+ # Helper Functions
38
+ # --------------------------
39
+ def embed_text(text: str):
40
+ """Generate embedding for text"""
41
+ return embedder.encode(text).tolist()
42
+
43
+ def embed_image(image: Image.Image):
44
+ """Generate embedding for image"""
45
+ img_tensor = embedder.encode(image, convert_to_tensor=True)
46
+ return img_tensor.cpu().detach().numpy().tolist()
47
+
48
+ # --------------------------
49
+ # Core Functions
50
+ # --------------------------
51
+ def add_item(description, image):
52
+ """Add a found item to Qdrant"""
53
+ if not description and image is None:
54
+ return "⚠️ Please provide description or image."
55
+
56
+ vectors = []
57
+ payload = {"description": description}
58
+
59
+ if description:
60
+ vectors = embed_text(description)
61
+
62
+ if image:
63
+ vectors = embed_image(image)
64
+ # Save uploaded image
65
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
66
+ image.save(tmp.name)
67
+ payload["image_path"] = tmp.name
68
+
69
+ point = PointStruct(
70
+ id=str(uuid.uuid4()),
71
+ vector=vectors,
72
+ payload=payload
73
+ )
74
+ qclient.upsert(collection_name=COLLECTION_NAME, points=[point])
75
+
76
+ return "✅ Item added successfully!"
77
+
78
+ def search_items(query_text, query_image, max_results, min_score):
79
+ """Search lost items by text or image"""
80
+ vectors = None
81
 
82
+ if query_text:
83
+ vectors = embed_text(query_text)
84
+
85
+ elif query_image:
86
+ vectors = embed_image(query_image)
87
+
88
+ else:
89
+ return ["⚠️ Provide text or image to search."]
90
+
91
+ results = qclient.search(
92
+ collection_name=COLLECTION_NAME,
93
+ query_vector=vectors,
94
+ limit=max_results,
95
+ score_threshold=min_score,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  )
97
 
98
+ if not results:
99
+ return ["No matches found."]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+ outputs = []
102
+ for r in results:
103
+ desc = r.payload.get("description", "No description")
104
+ img = r.payload.get("image_path", None)
105
+ score = round(r.score, 3)
106
+ if img:
107
+ outputs.append((img, f"{desc} (score: {score})"))
108
+ else:
109
+ outputs.append((None, f"{desc} (score: {score})"))
110
+
111
+ return outputs
112
+
113
+ # --------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  # Gradio UI
115
+ # --------------------------
116
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
117
  with gr.Tab("➕ Add Found Item"):
118
+ with gr.Row():
119
+ desc_in = gr.Textbox(label="Item Description")
120
+ img_in = gr.Image(type="pil", label="Upload Image")
 
121
  add_btn = gr.Button("Add Item")
122
  add_out = gr.Textbox(label="Status")
123
+ add_btn.click(fn=add_item, inputs=[desc_in, img_in], outputs=add_out)
 
 
 
 
124
 
125
  with gr.Tab("🔍 Search Lost Item"):
126
+ query_text = gr.Textbox(label="Search by Text (optional)")
127
+ query_img = gr.Image(type="pil", label="Search by Image (optional)")
128
+ max_results = gr.Slider(1, 20, step=1, value=5, label="Max Results")
129
+ min_score = gr.Slider(0.0, 1.0, step=0.01, value=0.3, label="Min Similarity Score")
130
  search_btn = gr.Button("Search")
131
+ results_out = gr.Gallery(label="Search Results").style(grid=2, height="auto")
132
+ search_btn.click(fn=search_items, inputs=[query_text, query_img, max_results, min_score], outputs=results_out)
 
 
 
 
 
133
 
134
  with gr.Tab("⚙️ Admin"):
135
+ gr.Markdown("Admin dashboard (future expansion).")
136
 
137
+ # --------------------------
138
+ # Run App
139
+ # --------------------------
140
  if __name__ == "__main__":
 
141
  demo.launch(server_name="0.0.0.0", server_port=7860)