Spaces:
Runtime error
Runtime error
Commit
·
dc23fc1
1
Parent(s):
a124643
Add files
Browse files
app.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image, ImageDraw
|
| 4 |
+
from typing import Tuple
|
| 5 |
+
import numpy as np
|
| 6 |
+
import const
|
| 7 |
+
import time
|
| 8 |
+
|
| 9 |
+
def draw_box(
|
| 10 |
+
draw: ImageDraw,
|
| 11 |
+
box: Tuple[float, float, float, float],
|
| 12 |
+
text: str = "",
|
| 13 |
+
color: Tuple[int, int, int] = (255, 255, 0),
|
| 14 |
+
) -> None:
|
| 15 |
+
"""
|
| 16 |
+
Draw a bounding box on and image.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
line_width = 3
|
| 20 |
+
font_height = 8
|
| 21 |
+
y_min, x_min, y_max, x_max = box
|
| 22 |
+
(left, right, top, bottom) = (
|
| 23 |
+
x_min,
|
| 24 |
+
x_max,
|
| 25 |
+
y_min,
|
| 26 |
+
y_max,
|
| 27 |
+
)
|
| 28 |
+
draw.line(
|
| 29 |
+
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
|
| 30 |
+
width=line_width,
|
| 31 |
+
fill=color,
|
| 32 |
+
)
|
| 33 |
+
if text:
|
| 34 |
+
draw.text(
|
| 35 |
+
(left + line_width, abs(top - line_width - font_height)), text, fill=color
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@st.cache(allow_output_mutation=True, show_spinner=True)
|
| 40 |
+
def get_model(model_id : str = "yolov5s"):
|
| 41 |
+
model = torch.hub.load("ultralytics/yolov5", model_id)
|
| 42 |
+
return model
|
| 43 |
+
|
| 44 |
+
# Settings
|
| 45 |
+
st.sidebar.title("Settings")
|
| 46 |
+
model_id = st.sidebar.selectbox("Pretrained model", const.PRETRAINED_MODELS, index=1)
|
| 47 |
+
img_size = st.sidebar.selectbox("Image resize for inference", const.IMAGE_SIZES, index=1)
|
| 48 |
+
CONFIDENCE = st.sidebar.slider(
|
| 49 |
+
"Confidence threshold",
|
| 50 |
+
const.MIN_CONF,
|
| 51 |
+
const.MAX_CONF,
|
| 52 |
+
const.DEFAULT_CONF,
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
model = get_model(model_id)
|
| 56 |
+
st.title(f"{model_id}")
|
| 57 |
+
|
| 58 |
+
img_file_buffer = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
|
| 59 |
+
if img_file_buffer is not None:
|
| 60 |
+
pil_image = Image.open(img_file_buffer)
|
| 61 |
+
|
| 62 |
+
else:
|
| 63 |
+
pil_image = Image.open(const.DEFAULT_IMAGE)
|
| 64 |
+
|
| 65 |
+
st.text(f"Input image width and height: {pil_image.width} x {pil_image.width}")
|
| 66 |
+
start_time = time.time()
|
| 67 |
+
results = model(pil_image, size=img_size)
|
| 68 |
+
end_time = time.time()
|
| 69 |
+
|
| 70 |
+
df = results.pandas().xyxy[0]
|
| 71 |
+
df = df[df["confidence"] > CONFIDENCE]
|
| 72 |
+
|
| 73 |
+
draw = ImageDraw.Draw(pil_image)
|
| 74 |
+
for _, obj in df.iterrows():
|
| 75 |
+
name = obj["name"]
|
| 76 |
+
confidence = obj["confidence"]
|
| 77 |
+
box_label = f"{name}"
|
| 78 |
+
|
| 79 |
+
draw_box(
|
| 80 |
+
draw,
|
| 81 |
+
(obj["ymin"], obj["xmin"], obj["ymax"], obj["xmax"]),
|
| 82 |
+
text=box_label,
|
| 83 |
+
color=const.RED,
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
st.image(
|
| 87 |
+
np.array(pil_image),
|
| 88 |
+
caption=f"Processed image",
|
| 89 |
+
use_column_width=True,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
st.text(f"Time to inference: {round(time.time() - end_time, 2)} sec")
|
| 93 |
+
|
| 94 |
+
st.table(df)
|
const.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# rgb(red, green, blue)
|
| 2 |
+
RED = (255, 0, 0) # For objects within the ROI
|
| 3 |
+
GREEN = (0, 255, 0) # For ROI box
|
| 4 |
+
YELLOW = (255, 255, 0) # For objects outside the ROI
|
| 5 |
+
|
| 6 |
+
DEFAULT_IMAGE = "demo.jpg"
|
| 7 |
+
|
| 8 |
+
PRETRAINED_MODELS = [
|
| 9 |
+
"yolov5n",
|
| 10 |
+
"yolov5s",
|
| 11 |
+
"yolov5m",
|
| 12 |
+
"yolov5l",
|
| 13 |
+
"yolov5x"
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
IMAGE_SIZES = [320, 640, 1280]
|
| 17 |
+
|
| 18 |
+
MIN_CONF = 0.1
|
| 19 |
+
MAX_CONF = 1.0
|
| 20 |
+
DEFAULT_CONF = 0.5
|
demo.jpg
ADDED
|
requirements.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
pandas
|
| 3 |
+
|
| 4 |
+
# YOLOv5 requirements
|
| 5 |
+
matplotlib>=3.2.2
|
| 6 |
+
numpy>=1.18.5
|
| 7 |
+
opencv-python>=4.1.1
|
| 8 |
+
Pillow>=7.1.2
|
| 9 |
+
PyYAML>=5.3.1
|
| 10 |
+
requests>=2.23.0
|
| 11 |
+
scipy>=1.4.1
|
| 12 |
+
torch>=1.7.0
|
| 13 |
+
torchvision>=0.8.1
|
| 14 |
+
tqdm>=4.64.0
|
| 15 |
+
seaborn>=0.11.0
|
| 16 |
+
|
| 17 |
+
# Extras --------------------------------------
|
| 18 |
+
ipython # interactive notebook
|
| 19 |
+
psutil # system utilization
|
| 20 |
+
thop>=0.1.1 # FLOPs computation
|