Spaces:
Runtime error
Runtime error
update
Browse files- app.py +153 -133
- requirements.txt +5 -9
app.py
CHANGED
|
@@ -1,140 +1,160 @@
|
|
| 1 |
-
|
| 2 |
-
import sys
|
| 3 |
-
import time
|
| 4 |
-
import socket
|
| 5 |
-
import atexit
|
| 6 |
-
import subprocess
|
| 7 |
-
import shutil
|
| 8 |
-
from pathlib import Path
|
| 9 |
-
|
| 10 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
| 11 |
import cv2
|
| 12 |
-
|
| 13 |
-
import
|
| 14 |
-
import
|
|
|
|
| 15 |
from huggingface_hub import hf_hub_download
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
# --- Configuration (reuse from main.py) ---
|
| 18 |
-
PORT = 8000
|
| 19 |
-
BASE_URL = f"http://localhost:{PORT}/v1"
|
| 20 |
-
MODEL_ALIAS = "gpt-4-vision-preview"
|
| 21 |
-
REPO_ID = "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF"
|
| 22 |
-
MODEL_FILE = "SmolVLM2-500M-Video-Instruct-Q8_0.gguf"
|
| 23 |
-
PROJ_FILE = "mmproj-SmolVLM2-500M-Video-Instruct-Q8_0.gguf"
|
| 24 |
-
|
| 25 |
-
# Download model files if missing
|
| 26 |
-
|
| 27 |
-
def download_if_missing(repo_id: str, filename: str) -> None:
|
| 28 |
-
if not os.path.isfile(filename):
|
| 29 |
-
cached = hf_hub_download(repo_id=repo_id, filename=filename)
|
| 30 |
-
shutil.copy(cached, filename)
|
| 31 |
-
|
| 32 |
-
# Ensure models on startup
|
| 33 |
-
ensure_models = lambda: [download_if_missing(REPO_ID, MODEL_FILE), download_if_missing(REPO_ID, PROJ_FILE)]
|
| 34 |
ensure_models()
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
# Send image to caption API
|
| 68 |
-
|
| 69 |
-
def caption_image_file(path: str) -> str:
|
| 70 |
-
b64 = base64.b64encode(open(path, "rb").read()).decode()
|
| 71 |
-
uri = f"data:image/jpeg;base64,{b64}"
|
| 72 |
-
payload = {
|
| 73 |
-
"model": MODEL_ALIAS,
|
| 74 |
-
"messages": [
|
| 75 |
-
{"role": "system", "content": (
|
| 76 |
-
"You are a precise image-captioning assistant. "
|
| 77 |
-
"Identify the main subject, their clothing, posture, and environment."
|
| 78 |
-
)},
|
| 79 |
-
{"role": "user", "content": [
|
| 80 |
{"type": "image_url", "image_url": {"url": uri}},
|
| 81 |
-
{"type": "text", "text": "
|
| 82 |
-
]
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
resp =
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
#
|
| 99 |
-
st.
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# app.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
+
st.set_page_config(layout="wide")
|
| 4 |
+
|
| 5 |
+
import av
|
| 6 |
import cv2
|
| 7 |
+
import time
|
| 8 |
+
import tempfile
|
| 9 |
+
import os
|
| 10 |
+
from pathlib import Path
|
| 11 |
from huggingface_hub import hf_hub_download
|
| 12 |
+
from streamlit_webrtc import webrtc_streamer, VideoProcessorBase, RTCConfiguration
|
| 13 |
+
from llama_cpp import Llama
|
| 14 |
+
from llama_cpp.llama_chat_format import LlamaChatCompletionHandlerRegistry, Llava15ChatHandler
|
| 15 |
+
from termcolor import cprint
|
| 16 |
+
|
| 17 |
+
# βββββββββββββββββββββββββββββββββββββββββ
|
| 18 |
+
# 1) Inline definition & registration of SmolVLM2ChatHandler
|
| 19 |
+
class SmolVLM2ChatHandler(Llava15ChatHandler):
|
| 20 |
+
CHAT_FORMAT = (
|
| 21 |
+
"<|im_start|>"
|
| 22 |
+
"{% for message in messages %}"
|
| 23 |
+
"{{ message['role'] | capitalize }}"
|
| 24 |
+
"{% if message['role']=='user' and message['content'][0]['type']=='image_url' %}:"
|
| 25 |
+
"{% else %}: "
|
| 26 |
+
"{% endif %}"
|
| 27 |
+
"{% for content in message['content'] %}"
|
| 28 |
+
"{% if content['type']=='text' %}{{ content['text'] }}"
|
| 29 |
+
"{% elif content['type']=='image_url' %}"
|
| 30 |
+
"{% if content['image_url'] is string %}"
|
| 31 |
+
"{{ content['image_url'] }}\n"
|
| 32 |
+
"{% elif content['image_url'] is mapping %}"
|
| 33 |
+
"{{ content['image_url']['url'] }}\n"
|
| 34 |
+
"{% endif %}"
|
| 35 |
+
"{% endif %}"
|
| 36 |
+
"{% endfor %}"
|
| 37 |
+
"<end_of_utterance>\n"
|
| 38 |
+
"{% endfor %}"
|
| 39 |
+
"{% if add_generation_prompt %}Assistant:{% endif %}"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
# Overwrite any previous registration
|
| 43 |
+
LlamaChatCompletionHandlerRegistry().register_chat_completion_handler(
|
| 44 |
+
"smolvlm2", SmolVLM2ChatHandler, overwrite=True
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# βββββββββββββββββββββββββββββββββββββββββ
|
| 48 |
+
# 2) Model & CLIP files β download if missing
|
| 49 |
+
MODEL_FILE = "SmolVLM2-500M-Video-Instruct.Q8_0.gguf"
|
| 50 |
+
CLIP_FILE = "mmproj-SmolVLM2-500M-Video-Instruct-Q8_0.gguf"
|
| 51 |
+
MODEL_REPO = "mradermacher/SmolVLM2-500M-Video-Instruct-GGUF"
|
| 52 |
+
CLIP_REPO = "ggml-org/SmolVLM2-500M-Video-Instruct-GGUF"
|
| 53 |
+
|
| 54 |
+
def ensure_models():
|
| 55 |
+
if not os.path.exists(MODEL_FILE):
|
| 56 |
+
path = hf_hub_download(repo_id=MODEL_REPO, filename=MODEL_FILE)
|
| 57 |
+
os.symlink(path, MODEL_FILE)
|
| 58 |
+
if not os.path.exists(CLIP_FILE):
|
| 59 |
+
path = hf_hub_download(repo_id=CLIP_REPO, filename=CLIP_FILE)
|
| 60 |
+
os.symlink(path, CLIP_FILE)
|
| 61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
ensure_models()
|
| 63 |
|
| 64 |
+
@st.cache_resource
|
| 65 |
+
def load_llm():
|
| 66 |
+
handler = SmolVLM2ChatHandler(clip_model_path=CLIP_FILE, verbose=False)
|
| 67 |
+
return Llama(
|
| 68 |
+
model_path=MODEL_FILE,
|
| 69 |
+
chat_handler=handler,
|
| 70 |
+
n_ctx=8192,
|
| 71 |
+
verbose=False,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
llm = load_llm()
|
| 75 |
+
|
| 76 |
+
# βββββββββββββββββββββββββββββββββββββββββ
|
| 77 |
+
# 3) Helper to run a single frame through the model (with debug)
|
| 78 |
+
def caption_frame(frame):
|
| 79 |
+
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
|
| 80 |
+
cv2.imwrite(f.name, frame)
|
| 81 |
+
uri = Path(f.name).absolute().as_uri()
|
| 82 |
+
|
| 83 |
+
messages = [
|
| 84 |
+
{
|
| 85 |
+
"role": "system",
|
| 86 |
+
"content": (
|
| 87 |
+
"Focus only on describing the key dramatic action or notable event occurring "
|
| 88 |
+
"in this image. Skip general context or scene-setting details unless they are "
|
| 89 |
+
"crucial to understanding the main action."
|
| 90 |
+
),
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"role": "user",
|
| 94 |
+
"content": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
{"type": "image_url", "image_url": {"url": uri}},
|
| 96 |
+
{"type": "text", "text": "What is happening in this image?"},
|
| 97 |
+
],
|
| 98 |
+
},
|
| 99 |
+
]
|
| 100 |
+
|
| 101 |
+
print("DEBUG βΆ caption_frame: invoking LLM")
|
| 102 |
+
resp = llm.create_chat_completion(
|
| 103 |
+
messages=messages,
|
| 104 |
+
max_tokens=128,
|
| 105 |
+
temperature=0.1,
|
| 106 |
+
stop=["<end_of_utterance>"],
|
| 107 |
+
)
|
| 108 |
+
out = (resp["choices"][0].get("message", {}).get("content") or "").strip()
|
| 109 |
+
print(f"DEBUG βΆ LLM returned: {out!r}")
|
| 110 |
+
return out
|
| 111 |
+
|
| 112 |
+
# βββββββββββββββββββββββββββββββββββββββββ
|
| 113 |
+
# 4) Streamlit UI + WebRTC configuration
|
| 114 |
+
st.title("π₯ Real-Time Camera Captioning with SmolVLM2 (CPU)")
|
| 115 |
+
|
| 116 |
+
interval_ms = st.slider(
|
| 117 |
+
"Caption every N ms", min_value=100, max_value=10000, value=1000, step=100
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
RTC_CONFIG = RTCConfiguration({
|
| 121 |
+
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
|
| 122 |
+
})
|
| 123 |
+
|
| 124 |
+
class CaptionProcessor(VideoProcessorBase):
|
| 125 |
+
def __init__(self):
|
| 126 |
+
self.interval = 1.0
|
| 127 |
+
self.last_time = time.time()
|
| 128 |
+
self.caption = ""
|
| 129 |
+
|
| 130 |
+
def recv(self, frame: av.VideoFrame) -> av.VideoFrame:
|
| 131 |
+
img = frame.to_ndarray(format="bgr24")
|
| 132 |
+
now = time.time()
|
| 133 |
+
if now - self.last_time >= self.interval:
|
| 134 |
+
self.last_time = now
|
| 135 |
+
print("DEBUG βΆ CaptionProcessor.recv: time reached, generating caption")
|
| 136 |
+
self.caption = caption_frame(img)
|
| 137 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
| 138 |
+
|
| 139 |
+
ctx = webrtc_streamer(
|
| 140 |
+
key="smolvlm2-captioner",
|
| 141 |
+
video_processor_factory=CaptionProcessor,
|
| 142 |
+
rtc_configuration=RTC_CONFIG,
|
| 143 |
+
media_stream_constraints={"video": True, "audio": False},
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Update the processor interval
|
| 147 |
+
if ctx.video_processor:
|
| 148 |
+
ctx.video_processor.interval = interval_ms / 1000.0
|
| 149 |
+
|
| 150 |
+
# Placeholder for showing captions
|
| 151 |
+
placeholder = st.empty()
|
| 152 |
+
if ctx.state.playing:
|
| 153 |
+
placeholder.markdown("**Caption:** _Waiting for inferenceβ¦_")
|
| 154 |
+
while ctx.state.playing:
|
| 155 |
+
txt = ctx.video_processor.caption or "_β¦thinkingβ¦_"
|
| 156 |
+
placeholder.markdown(f"**Caption:** {txt}")
|
| 157 |
+
time.sleep(0.1)
|
| 158 |
+
else:
|
| 159 |
+
st.info("βΆοΈ Click **Start** above to begin streaming")
|
| 160 |
+
|
requirements.txt
CHANGED
|
@@ -1,10 +1,6 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
opencv-python
|
| 7 |
-
fastapi
|
| 8 |
-
uvicorn[standard]
|
| 9 |
-
llama-cpp-python[server]==0.3.9
|
| 10 |
-
Pillow
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
streamlit-webrtc
|
| 3 |
+
llama-cpp-python
|
| 4 |
+
huggingface-hub
|
| 5 |
+
termcolor
|
| 6 |
opencv-python
|
|
|
|
|
|
|
|
|
|
|
|