Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -154,61 +154,20 @@ def format_description(description, breed):
|
|
| 154 |
return formatted_description
|
| 155 |
|
| 156 |
|
| 157 |
-
# async def predict_single_dog(image):
|
| 158 |
-
# image_tensor = preprocess_image(image)
|
| 159 |
-
# with torch.no_grad():
|
| 160 |
-
# output = model(image_tensor)
|
| 161 |
-
# logits = output[0] if isinstance(output, tuple) else output
|
| 162 |
-
# probabilities = F.softmax(logits, dim=1)
|
| 163 |
-
# topk_probs, topk_indices = torch.topk(probabilities, k=3)
|
| 164 |
-
# top1_prob = topk_probs[0][0].item()
|
| 165 |
-
# topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]]
|
| 166 |
-
# topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]]
|
| 167 |
-
# return top1_prob, topk_breeds, topk_probs_percent
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
# async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4):
|
| 171 |
-
# results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
| 172 |
-
# dogs = []
|
| 173 |
-
# boxes = []
|
| 174 |
-
# for box in results.boxes:
|
| 175 |
-
# if box.cls == 16: # COCO dataset class for dog is 16
|
| 176 |
-
# xyxy = box.xyxy[0].tolist()
|
| 177 |
-
# confidence = box.conf.item()
|
| 178 |
-
# boxes.append((xyxy, confidence))
|
| 179 |
-
|
| 180 |
-
# if not boxes:
|
| 181 |
-
# dogs.append((image, 1.0, [0, 0, image.width, image.height]))
|
| 182 |
-
# else:
|
| 183 |
-
# nms_boxes = non_max_suppression(boxes, iou_threshold)
|
| 184 |
-
|
| 185 |
-
# for box, confidence in nms_boxes:
|
| 186 |
-
# x1, y1, x2, y2 = box
|
| 187 |
-
# w, h = x2 - x1, y2 - y1
|
| 188 |
-
# x1 = max(0, x1 - w * 0.05)
|
| 189 |
-
# y1 = max(0, y1 - h * 0.05)
|
| 190 |
-
# x2 = min(image.width, x2 + w * 0.05)
|
| 191 |
-
# y2 = min(image.height, y2 + h * 0.05)
|
| 192 |
-
# cropped_image = image.crop((x1, y1, x2, y2))
|
| 193 |
-
# dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
| 194 |
-
|
| 195 |
-
# return dogs
|
| 196 |
-
|
| 197 |
-
|
| 198 |
async def predict_single_dog(image):
|
| 199 |
image_tensor = preprocess_image(image)
|
| 200 |
with torch.no_grad():
|
| 201 |
output = model(image_tensor)
|
| 202 |
logits = output[0] if isinstance(output, tuple) else output
|
| 203 |
probabilities = F.softmax(logits, dim=1)
|
| 204 |
-
topk_probs, topk_indices = torch.topk(probabilities, k=
|
| 205 |
top1_prob = topk_probs[0][0].item()
|
| 206 |
topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]]
|
| 207 |
topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]]
|
| 208 |
return top1_prob, topk_breeds, topk_probs_percent
|
|
|
|
| 209 |
|
| 210 |
-
|
| 211 |
-
async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.5):
|
| 212 |
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
| 213 |
dogs = []
|
| 214 |
boxes = []
|
|
@@ -226,10 +185,10 @@ async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.5):
|
|
| 226 |
for box, confidence in nms_boxes:
|
| 227 |
x1, y1, x2, y2 = box
|
| 228 |
w, h = x2 - x1, y2 - y1
|
| 229 |
-
x1 = max(0, x1 - w * 0.
|
| 230 |
-
y1 = max(0, y1 - h * 0.
|
| 231 |
-
x2 = min(image.width, x2 + w * 0.
|
| 232 |
-
y2 = min(image.height, y2 + h * 0.
|
| 233 |
cropped_image = image.crop((x1, y1, x2, y2))
|
| 234 |
dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
| 235 |
|
|
|
|
| 154 |
return formatted_description
|
| 155 |
|
| 156 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
async def predict_single_dog(image):
|
| 158 |
image_tensor = preprocess_image(image)
|
| 159 |
with torch.no_grad():
|
| 160 |
output = model(image_tensor)
|
| 161 |
logits = output[0] if isinstance(output, tuple) else output
|
| 162 |
probabilities = F.softmax(logits, dim=1)
|
| 163 |
+
topk_probs, topk_indices = torch.topk(probabilities, k=3)
|
| 164 |
top1_prob = topk_probs[0][0].item()
|
| 165 |
topk_breeds = [dog_breeds[idx.item()] for idx in topk_indices[0]]
|
| 166 |
topk_probs_percent = [f"{prob.item() * 100:.2f}%" for prob in topk_probs[0]]
|
| 167 |
return top1_prob, topk_breeds, topk_probs_percent
|
| 168 |
+
|
| 169 |
|
| 170 |
+
async def detect_multiple_dogs(image, conf_threshold=0.25, iou_threshold=0.4):
|
|
|
|
| 171 |
results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
|
| 172 |
dogs = []
|
| 173 |
boxes = []
|
|
|
|
| 185 |
for box, confidence in nms_boxes:
|
| 186 |
x1, y1, x2, y2 = box
|
| 187 |
w, h = x2 - x1, y2 - y1
|
| 188 |
+
x1 = max(0, x1 - w * 0.05)
|
| 189 |
+
y1 = max(0, y1 - h * 0.05)
|
| 190 |
+
x2 = min(image.width, x2 + w * 0.05)
|
| 191 |
+
y2 = min(image.height, y2 + h * 0.05)
|
| 192 |
cropped_image = image.crop((x1, y1, x2, y2))
|
| 193 |
dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
|
| 194 |
|