Spaces:
Running
Running
File size: 1,443 Bytes
6590ce7 4745afa 6590ce7 db7b04a 00d949a e24712a 944dab1 7863bc5 ced9eab 944dab1 1a47aa3 66bc972 944dab1 6d4ed3b 944dab1 66bc972 944dab1 4745afa edcea04 313686f edcea04 313686f edcea04 00d949a 2a0fb5d 944dab1 4745afa 944dab1 e24712a 944dab1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
import torch
from PIL import Image
import numpy as np
import sys, os
# ---- Add YOLOv12 code to Python path ----
fork_ultra_path = os.path.join(os.path.dirname(__file__), "yolov12-main", "yolov12-main")
sys.path.insert(0, fork_ultra_path)
# ---- Import YOLO ----
from ultralytics import YOLO
# ---- Load trained model ----
model_path = os.path.join(os.path.dirname(__file__), "last.pt")
model = YOLO(model_path)
# ---- Detection function ----
def detect_objects(image):
# Convert PIL to RGB NumPy array (YOLO expects RGB)
img = np.array(image.convert("RGB"))
# Run prediction with same settings used during training
results = model.predict(
source=img,
imgsz=640, # same resolution as training
conf=0.25, # lower confidence threshold to catch subtle detections
iou=0.45, # standard non-max suppression
verbose=False
)
# Visualize results
annotated = results[0].plot()
return Image.fromarray(annotated)
# ---- Gradio Interface ----
demo = gr.Interface(
fn=detect_objects,
inputs=gr.Image(type="pil", label="Upload an Ear Image"),
outputs=gr.Image(type="pil", label="Detection Result"),
title="Ear Condition Detection",
description="Upload an ear image to detect possible conditions using the trained YOLOv12 model."
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|