Spaces:
Runtime error
Runtime error
| import os | |
| import uuid | |
| import gradio as gr | |
| from PIL import Image | |
| from qdrant_client import QdrantClient, models | |
| from transformers import CLIPProcessor, CLIPModel | |
| # ============================== | |
| # Setup | |
| # ============================== | |
| UPLOAD_DIR = "uploaded_images" | |
| os.makedirs(UPLOAD_DIR, exist_ok=True) | |
| qclient = QdrantClient(":memory:") | |
| COLLECTION = "lost_and_found" | |
| # Create collection (with deprecation fix) | |
| if not qclient.collection_exists(COLLECTION): | |
| qclient.create_collection( | |
| COLLECTION, | |
| vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE), | |
| ) | |
| # Load CLIP | |
| clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") | |
| clip_proc = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") | |
| # ============================== | |
| # Encode Function | |
| # ============================== | |
| def encode_data(text=None, image=None): | |
| if text: | |
| inputs = clip_proc(text=[text], return_tensors="pt", padding=True) | |
| return clip_model.get_text_features(**inputs).detach().numpy()[0] | |
| if image: | |
| inputs = clip_proc(images=image, return_tensors="pt") | |
| return clip_model.get_image_features(**inputs).detach().numpy()[0] | |
| raise ValueError("Need either text or image for encoding") | |
| # ============================== | |
| # Add Item | |
| # ============================== | |
| def add_item(mode, text, image, uploader_name, uploader_phone): | |
| try: | |
| img_path = None | |
| if image: | |
| img_id = str(uuid.uuid4()) | |
| img_path = os.path.join(UPLOAD_DIR, f"{img_id}.png") | |
| image.save(img_path) | |
| vector = encode_data(text=text if text else None, image=image if image else None) | |
| qclient.upsert( | |
| collection_name=COLLECTION, | |
| points=[ | |
| models.PointStruct( | |
| id=str(uuid.uuid4()), | |
| vector=vector.tolist(), | |
| payload={ | |
| "mode": mode, | |
| "text": text or "", | |
| "uploader_name": uploader_name or "N/A", | |
| "uploader_phone": uploader_phone or "N/A", | |
| "image_path": img_path, | |
| "has_image": bool(image), | |
| }, | |
| ) | |
| ], | |
| ) | |
| return f"β Added successfully as {mode}!" | |
| except Exception as e: | |
| return f"β Error: {e}" | |
| # ============================== | |
| # Search Items | |
| # ============================== | |
| def search_items(text, image, max_results, min_score): | |
| try: | |
| vector = encode_data(text=text if text else None, image=image if image else None) | |
| results = qclient.search( | |
| collection_name=COLLECTION, | |
| query_vector=vector.tolist(), | |
| limit=max_results, | |
| score_threshold=min_score, | |
| with_payload=True, | |
| ) | |
| texts, imgs = [], [] | |
| for r in results: | |
| p = r.payload | |
| desc = ( | |
| f"id:{r.id} | score:{r.score:.3f} | mode:{p.get('mode','')} | text:{p.get('text','')}" | |
| ) | |
| # Always show uploader details | |
| uploader_name = p.get("uploader_name", "N/A") or "N/A" | |
| uploader_phone = p.get("uploader_phone", "N/A") or "N/A" | |
| desc += f" | uploader:{uploader_name} ({uploader_phone})" | |
| texts.append(desc) | |
| if p.get("has_image") and "image_path" in p: | |
| imgs.append(p["image_path"]) | |
| return "\n".join(texts) if texts else "No matches", imgs | |
| except Exception as e: | |
| return f"β Error: {e}", [] | |
| # ============================== | |
| # Delete All | |
| # ============================== | |
| def clear_all(): | |
| qclient.recreate_collection( | |
| COLLECTION, vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE) | |
| ) | |
| return "ποΈ All items cleared." | |
| # ============================== | |
| # Gradio UI | |
| # ============================== | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# π Lost & Found Image/Text Search") | |
| with gr.Tab("β Add Item"): | |
| mode = gr.Radio(["lost", "found"], label="Mode", value="found") | |
| text_in = gr.Textbox(label="Description (optional)") | |
| img_in = gr.Image(type="pil", label="Upload Image") | |
| uploader_name = gr.Textbox(label="Your Name") | |
| uploader_phone = gr.Textbox(label="Your Phone") | |
| add_btn = gr.Button("Add to Database") | |
| add_out = gr.Textbox(label="Status") | |
| add_btn.click( | |
| add_item, | |
| inputs=[mode, text_in, img_in, uploader_name, uploader_phone], | |
| outputs=add_out, | |
| ) | |
| with gr.Tab("π Search"): | |
| search_text = gr.Textbox(label="Search by text (optional)") | |
| search_img = gr.Image(type="pil", label="Search by image (optional)") | |
| max_results = gr.Slider(1, 10, value=5, step=1, label="Max results") | |
| min_score = gr.Slider(0.5, 1.0, value=0.8, step=0.01, label="Min similarity threshold") | |
| search_btn = gr.Button("Search") | |
| search_out = gr.Textbox(label="Search results (text)") | |
| search_gallery = gr.Gallery(label="Search Results") | |
| search_btn.click( | |
| search_items, | |
| inputs=[search_text, search_img, max_results, min_score], | |
| outputs=[search_out, search_gallery], | |
| ) | |
| with gr.Tab("ποΈ Admin"): | |
| clear_btn = gr.Button("Clear All Items") | |
| clear_out = gr.Textbox(label="Status") | |
| clear_btn.click(clear_all, outputs=clear_out) | |
| # ============================== | |
| # Launch | |
| # ============================== | |
| if __name__ == "__main__": | |
| demo.launch() | |