Spaces:
Running
Running
| #!/usr/bin/env python | |
| import pathlib | |
| import cv2 | |
| import face_alignment | |
| import gradio as gr | |
| import numpy as np | |
| import torch | |
| DESCRIPTION = "# [face-alignment](https://github.com/1adrianb/face-alignment)" | |
| MAX_IMAGE_SIZE = 1800 | |
| device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
| detector = face_alignment.FaceAlignment(face_alignment.LandmarksType.TWO_D, device=device.type) | |
| def detect(image: np.ndarray) -> np.ndarray: | |
| landmarks, _, boxes = detector.get_landmarks(image, return_bboxes=True) | |
| if landmarks is None: | |
| return image | |
| res = image.copy() | |
| for pts, box in zip(landmarks, boxes, strict=False): | |
| box_int = np.round(box[:4]).astype(int) | |
| cv2.rectangle(res, tuple(box_int[:2]), tuple(box_int[2:]), (0, 255, 0), 2) | |
| tl = pts.min(axis=0) | |
| br = pts.max(axis=0) | |
| size = (br - tl).max() | |
| radius = max(2, int(3 * size / 256)) | |
| for pt in np.round(pts).astype(int): | |
| cv2.circle(res, tuple(pt), radius, (0, 255, 0), cv2.FILLED) | |
| return res | |
| image_paths = sorted(pathlib.Path("images").glob("*.jpg")) | |
| examples = [[path.as_posix()] for path in image_paths] | |
| with gr.Blocks(css_paths="style.css") as demo: | |
| gr.Markdown(DESCRIPTION) | |
| with gr.Row(): | |
| with gr.Column(): | |
| image = gr.Image(label="Input", type="numpy") | |
| run_button = gr.Button() | |
| with gr.Column(): | |
| output = gr.Image(label="Output") | |
| gr.Examples( | |
| examples=examples, | |
| inputs=image, | |
| outputs=output, | |
| fn=detect, | |
| ) | |
| run_button.click( | |
| fn=detect, | |
| inputs=image, | |
| outputs=output, | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |