Spaces:
Runtime error
Runtime error
File size: 7,854 Bytes
746bf5b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
# app.py
import os
import uuid
import io
from PIL import Image
import gradio as gr
import numpy as np
# CLIP via Sentence-Transformers (text+image to same 512-dim space)
from sentence_transformers import SentenceTransformer
# Gemini (Google) client
from google import genai
# Qdrant client & helpers
from qdrant_client import QdrantClient
from qdrant_client.http.models import VectorParams, Distance, PointStruct
# -------------------------
# CONFIG (reads env vars)
# -------------------------
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") # set in Hugging Face Space secrets
QDRANT_URL = os.environ.get("QDRANT_URL") # set in Hugging Face Space secrets
QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY") # set in Hugging Face Space secrets
# Local fallbacks (for local testing) - set them before running locally if needed:
# os.environ["GEMINI_API_KEY"]="..." ; os.environ["QDRANT_URL"]="..." ; os.environ["QDRANT_API_KEY"]="..."
# -------------------------
# Initialize clients/models
# -------------------------
print("Loading CLIP model (this may take 20-60s the first time)...")
MODEL_ID = "sentence-transformers/clip-ViT-B-32-multilingual-v1"
clip_model = SentenceTransformer(MODEL_ID) # model maps text & images to same vector space
# Gemini client (for tags/captions)
if GEMINI_API_KEY:
genai_client = genai.Client(api_key=GEMINI_API_KEY)
else:
genai_client = None
# Qdrant client
if not QDRANT_URL:
# If you prefer local Qdrant for dev: client = QdrantClient(":memory:") or local url
raise RuntimeError("Please set QDRANT_URL environment variable")
qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
COLLECTION = "lost_found_items"
VECTOR_SIZE = 512
# Create collection if missing
if not qclient.collection_exists(COLLECTION):
qclient.create_collection(
collection_name=COLLECTION,
vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
)
# -------------------------
# Helpers
# -------------------------
def embed_text(text: str):
vec = clip_model.encode(text, convert_to_numpy=True)
return vec
def embed_image_pil(pil_img: Image.Image):
# sentence-transformers supports directly encoding a PIL image for CLIP models
vec = clip_model.encode(pil_img, convert_to_numpy=True)
return vec
def gen_tags_from_image_file(local_path: str) -> str:
"""Upload image file to Gemini and ask for 4 short tags.
Returns the raw text response (expected comma-separated tags)."""
if genai_client is None:
return ""
# Upload file (Gemini Developer API supports client.files.upload)
file_obj = genai_client.files.upload(file=local_path)
# Ask Gemini: produce short tags only
prompt_text = (
"Give 4 short tags (comma-separated) describing this item in the image. "
"Tags should be short single words or two-word phrases (e.g. 'black backpack', 'water bottle'). "
"Respond only with tags, no extra explanation."
)
response = genai_client.models.generate_content(
model="gemini-2.5-flash",
contents=[prompt_text, file_obj],
)
return response.text.strip()
# -------------------------
# App logic: add item
# -------------------------
def add_item(mode: str, uploaded_image, text_description: str):
"""
mode: 'lost' or 'found'
uploaded_image: PIL image or None
text_description: str
"""
item_id = str(uuid.uuid4())
payload = {"mode": mode, "text": text_description}
if uploaded_image is not None:
# Save image to temp file (so we can upload to Gemini)
tmp_path = f"/tmp/{item_id}.png"
uploaded_image.save(tmp_path)
# embed image
vec = embed_image_pil(uploaded_image).tolist()
payload["has_image"] = True
# optional: get tags from Gemini (if available)
try:
tags = gen_tags_from_image_file(tmp_path)
except Exception as e:
tags = ""
payload["tags"] = tags
# store image bytes (tiny) so we can show result in the UI (base64)
with open(tmp_path, "rb") as f:
b64 = f.read()
payload["image_b64"] = True # flag (we will return/show image via Gradio from file bytes)
else:
# only text provided
vec = embed_text(text_description).tolist()
payload["has_image"] = False
# ask Gemini to suggest tags from text
if genai_client:
try:
resp = genai_client.models.generate_content(
model="gemini-2.5-flash",
contents=f"Give 4 short, comma-separated tags for this item described as: {text_description}. Reply only with tags."
)
payload["tags"] = resp.text.strip()
except Exception:
payload["tags"] = ""
else:
payload["tags"] = ""
# Upsert into Qdrant
point = PointStruct(id=item_id, vector=vec, payload=payload)
qclient.upsert(collection_name=COLLECTION, points=[point], wait=True)
return f"Saved item id: {item_id}\nTags: {payload.get('tags','')}"
# -------------------------
# App logic: search
# -------------------------
def search_items(query_image, query_text, limit: int = 5):
# produce query embedding
if query_image is not None:
qvec = embed_image_pil(query_image).tolist()
q_type = "image"
else:
if (not query_text) or (len(query_text.strip()) == 0):
return "Please provide a query image or some query text."
qvec = embed_text(query_text).tolist()
q_type = "text"
hits = qclient.search(collection_name=COLLECTION, query_vector=qvec, limit=limit)
# Format output (list)
results = []
for h in hits:
payload = h.payload or {}
score = getattr(h, "score", None)
results.append(
{
"id": h.id,
"score": float(score) if score is not None else None,
"mode": payload.get("mode", ""),
"text": payload.get("text", ""),
"tags": payload.get("tags", ""),
"has_image": payload.get("has_image", False),
}
)
# Return a simple list for Gradio to show
if not results:
return "No results."
# Convert to text for display
out_lines = []
for r in results:
out_lines.append(f"id:{r['id']} score:{r['score']:.4f} mode:{r['mode']} tags:{r['tags']} text:{r['text']}")
return "\n\n".join(out_lines)
# -------------------------
# Gradio UI
# -------------------------
with gr.Blocks(title="Lost & Found — Simple Helper") as demo:
gr.Markdown("## Lost & Found Helper (image/text search) — upload items, then search by image or text.")
with gr.Row():
with gr.Column():
mode = gr.Radio(choices=["lost", "found"], value="lost", label="Add as")
upload_img = gr.Image(type="pil", label="Item photo (optional)")
text_desc = gr.Textbox(lines=2, placeholder="Short description (e.g. 'black backpack with blue zipper')", label="Description (optional)")
add_btn = gr.Button("Add item")
add_out = gr.Textbox(label="Add result", interactive=False)
with gr.Column():
gr.Markdown("### Search")
query_img = gr.Image(type="pil", label="Search by image (optional)")
query_text = gr.Textbox(lines=2, label="Search by text (optional)")
search_btn = gr.Button("Search")
search_out = gr.Textbox(label="Search results", interactive=False)
add_btn.click(add_item, inputs=[mode, upload_img, text_desc], outputs=[add_out])
search_btn.click(search_items, inputs=[query_img, query_text], outputs=[search_out])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|