Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from PIL import Image | |
| import numpy as np | |
| import sys, os | |
| # ---- Add YOLOv12 code to Python path ---- | |
| fork_ultra_path = os.path.join(os.path.dirname(__file__), "yolov12-main", "yolov12-main") | |
| sys.path.insert(0, fork_ultra_path) | |
| # ---- Import YOLO ---- | |
| from ultralytics import YOLO | |
| # ---- Load trained model ---- | |
| model_path = os.path.join(os.path.dirname(__file__), "last.pt") | |
| model = YOLO(model_path) | |
| # ---- Detection function ---- | |
| def detect_objects(image): | |
| # Convert PIL to RGB NumPy array (YOLO expects RGB) | |
| img = np.array(image.convert("RGB")) | |
| # Run prediction with same settings used during training | |
| results = model.predict( | |
| source=img, | |
| imgsz=640, # same resolution as training | |
| conf=0.25, # lower confidence threshold to catch subtle detections | |
| iou=0.45, # standard non-max suppression | |
| verbose=False | |
| ) | |
| # Visualize results | |
| annotated = results[0].plot() | |
| return Image.fromarray(annotated) | |
| # ---- Gradio Interface ---- | |
| demo = gr.Interface( | |
| fn=detect_objects, | |
| inputs=gr.Image(type="pil", label="Upload an Ear Image"), | |
| outputs=gr.Image(type="pil", label="Detection Result"), | |
| title="Ear Condition Detection", | |
| description="Upload an ear image to detect possible conditions using the trained YOLOv12 model." | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |