Update app.py
Browse files
app.py
CHANGED
|
@@ -1,11 +1,9 @@
|
|
| 1 |
import logging
|
| 2 |
import logging.handlers
|
| 3 |
import queue
|
| 4 |
-
import threading
|
| 5 |
-
import time
|
| 6 |
import urllib.request
|
| 7 |
from pathlib import Path
|
| 8 |
-
from typing import List, NamedTuple
|
| 9 |
|
| 10 |
try:
|
| 11 |
from typing import Literal
|
|
@@ -241,21 +239,14 @@ def app_object_detection():
|
|
| 241 |
|
| 242 |
class MobileNetSSDVideoTransformer(VideoTransformerBase):
|
| 243 |
confidence_threshold: float
|
| 244 |
-
|
| 245 |
-
_result_lock: threading.Lock
|
| 246 |
|
| 247 |
def __init__(self) -> None:
|
| 248 |
self._net = cv2.dnn.readNetFromCaffe(
|
| 249 |
str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)
|
| 250 |
)
|
| 251 |
self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD
|
| 252 |
-
self.
|
| 253 |
-
self._result_lock = threading.Lock()
|
| 254 |
-
|
| 255 |
-
@property
|
| 256 |
-
def result(self) -> Union[List[Detection], None]:
|
| 257 |
-
with self._result_lock:
|
| 258 |
-
return self._result
|
| 259 |
|
| 260 |
def _annotate_image(self, image, detections):
|
| 261 |
# loop over the detections
|
|
@@ -301,8 +292,7 @@ def app_object_detection():
|
|
| 301 |
|
| 302 |
# NOTE: This `transform` method is called in another thread,
|
| 303 |
# so it must be thread-safe.
|
| 304 |
-
|
| 305 |
-
self._result = result
|
| 306 |
|
| 307 |
return annotated_image
|
| 308 |
|
|
@@ -327,11 +317,11 @@ def app_object_detection():
|
|
| 327 |
# this loop displaying the result labels are running
|
| 328 |
# in different threads asynchronously.
|
| 329 |
# Then the rendered video frames and the labels displayed here
|
| 330 |
-
# are not synchronized.
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
|
| 336 |
st.markdown(
|
| 337 |
"This demo uses a model and code from "
|
|
|
|
| 1 |
import logging
|
| 2 |
import logging.handlers
|
| 3 |
import queue
|
|
|
|
|
|
|
| 4 |
import urllib.request
|
| 5 |
from pathlib import Path
|
| 6 |
+
from typing import List, NamedTuple
|
| 7 |
|
| 8 |
try:
|
| 9 |
from typing import Literal
|
|
|
|
| 239 |
|
| 240 |
class MobileNetSSDVideoTransformer(VideoTransformerBase):
|
| 241 |
confidence_threshold: float
|
| 242 |
+
result_queue: "queue.Queue[List[Detection]]"
|
|
|
|
| 243 |
|
| 244 |
def __init__(self) -> None:
|
| 245 |
self._net = cv2.dnn.readNetFromCaffe(
|
| 246 |
str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)
|
| 247 |
)
|
| 248 |
self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD
|
| 249 |
+
self.result_queue = queue.Queue()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
|
| 251 |
def _annotate_image(self, image, detections):
|
| 252 |
# loop over the detections
|
|
|
|
| 292 |
|
| 293 |
# NOTE: This `transform` method is called in another thread,
|
| 294 |
# so it must be thread-safe.
|
| 295 |
+
self.result_queue.put(result)
|
|
|
|
| 296 |
|
| 297 |
return annotated_image
|
| 298 |
|
|
|
|
| 317 |
# this loop displaying the result labels are running
|
| 318 |
# in different threads asynchronously.
|
| 319 |
# Then the rendered video frames and the labels displayed here
|
| 320 |
+
# are not strictly synchronized.
|
| 321 |
+
if webrtc_ctx.video_transformer:
|
| 322 |
+
while True:
|
| 323 |
+
result = webrtc_ctx.video_transformer.result_queue.get()
|
| 324 |
+
labels_placeholder.table(result)
|
| 325 |
|
| 326 |
st.markdown(
|
| 327 |
"This demo uses a model and code from "
|