Spaces:
Running
Running
| import os | |
| import gradio as gr | |
| import requests | |
| import json | |
| import io | |
| from gradio.components import Image | |
| from PIL import Image as PILImage, ImageDraw, ImageFont # This import may be needed if you're processing images | |
| from PIL import Image | |
| from PIL import Image | |
| import io | |
| import base64 | |
| def face_crop(image, face_rect): | |
| x = face_rect.get('x') | |
| y = face_rect.get('y') | |
| width = face_rect.get('width') | |
| height = face_rect.get('height') | |
| if x < 0: | |
| x = 0 | |
| if y < 0: | |
| y = 0 | |
| if x + width >= image.width: | |
| width = image.width - x | |
| if y + height >= image.height: | |
| height = image.height - y | |
| face_image = image.crop((x, y, x + width - 1, y + height - 1)) | |
| face_image_ratio = face_image.width / float(face_image.height) | |
| resized_w = int(face_image_ratio * 150) | |
| resized_h = 150 | |
| face_image = face_image.resize((int(resized_w), int(resized_h))) | |
| return face_image | |
| def pil_image_to_base64(image, format="PNG"): | |
| """ | |
| Converts a PIL.Image object to a Base64-encoded string. | |
| :param image: PIL.Image object | |
| :param format: Format to save the image, e.g., "PNG", "JPEG" | |
| :return: Base64-encoded string | |
| """ | |
| # Save the image to a BytesIO buffer | |
| buffer = io.BytesIO() | |
| image.save(buffer, format=format) | |
| buffer.seek(0) # Rewind the buffer | |
| # Convert the buffer's contents to a Base64 string | |
| base64_string = base64.b64encode(buffer.getvalue()).decode('utf-8') | |
| return base64_string | |
| def compare_face(image1, image2, verifyThreshold): | |
| try: | |
| img_bytes1 = io.BytesIO() | |
| image1.save(img_bytes1, format="JPEG") | |
| img_bytes1.seek(0) | |
| except: | |
| return ["Failed to open image1", {"resultCode": "Failed to open image1"}] | |
| try: | |
| img_bytes2 = io.BytesIO() | |
| image2.save(img_bytes2, format="JPEG") | |
| img_bytes2.seek(0) | |
| except: | |
| return ["Failed to open image2", {"resultCode": "Failed to open image2"}] | |
| url = "http://127.0.0.1:8000/compare_face" | |
| files = {'image1': img_bytes1, 'image2': img_bytes2} | |
| result = requests.post(url=url, files=files) | |
| if result.ok: | |
| json_result = result.json() | |
| if json_result.get("resultCode") != "Ok": | |
| return [json_result.get("resultCode"), json_result] | |
| html = "" | |
| faces1 = json_result.get("faces1", {}) | |
| faces2 = json_result.get("faces2", {}) | |
| results = json_result.get("results", {}) | |
| for result in results: | |
| score = result.get('score') | |
| face1_idx = result.get('face1') | |
| face2_idx = result.get('face2') | |
| face_image1 = face_crop(image1, faces1[face1_idx]) | |
| face_value1 = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image1, format="PNG")) | |
| face_image2 = face_crop(image2, faces2[face2_idx]) | |
| face_value2 = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image2, format="PNG")) | |
| match_icon = '<svg fill="red" width="19" height="32" viewBox="0 0 19 32"><path d="M0 13.92V10.2H19V13.92H0ZM0 21.64V17.92H19V21.64H0Z"></path><path d="M14.08 0H18.08L5.08 32H1.08L14.08 0Z"></path></svg>' | |
| if score > verifyThreshold: | |
| match_icon = '<svg fill="green" width="19" height="32" viewBox="0 0 19 32"><path d="M0 13.9202V10.2002H19V13.9202H0ZM0 21.6402V17.9202H19V21.6402H0Z"></path></svg>' | |
| item_value = ('<div style="align-items: center; gap: 10px; display: flex; flex-direction: column;">' | |
| '<div style="display: flex; align-items: center; gap: 20px;">' | |
| '{face_value1}' | |
| '{match_icon}' | |
| '{face_value2}' | |
| '</div>' | |
| '<div style="text-align: center; margin-top: 10px;">' | |
| 'Score: {score}' | |
| '</div>' | |
| '</div>' | |
| ).format(face_value1=face_value1, face_value2=face_value2, match_icon=match_icon, score=f"{score:.2f}") | |
| html += item_value | |
| html += '<hr style="border: 1px solid #C0C0C0; margin: 10px 0;"/>' | |
| return [html, json_result] | |
| else: | |
| return [result.text, {"resultCode": result.text}] | |
| def detect_face(image): | |
| try: | |
| img_bytes = io.BytesIO() | |
| image.save(img_bytes, format="JPEG") | |
| img_bytes.seek(0) | |
| except: | |
| return ["Failed to open image", {"resultCode": "Failed to open image"}] | |
| url = "http://127.0.0.1:8000/detect_face" | |
| files = {'image': img_bytes} | |
| result = requests.post(url=url, files=files) | |
| if result.ok: | |
| json_result = result.json() | |
| html = "" | |
| resultCode = json_result.get("resultCode") | |
| if resultCode == "Ok": | |
| faces = json_result.get("result", {}) | |
| for face in faces: | |
| face_rect = face.get("rect", {}) | |
| angles = face.get("angles", {}) | |
| age_gender = face.get("age_gender", {}) | |
| emotion = face.get("emotion", {}) | |
| attribute = face.get("attribute", {}) | |
| face_image = face_crop(image, face_rect) | |
| face_value = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image, format="PNG")) | |
| item_value = ('<div style="display: flex; justify-content: center; align-items: flex-start; margin: 10px;">' | |
| '<div style="display: flex; align-items: flex-start; gap: 40px; ">' | |
| '{face_value}' | |
| '<div style="display: flex; gap: 20px; border-left: 1px solid #C0C0C0; padding-left: 20px;">' | |
| '<div>' | |
| '<p><b>Age</b></p>' | |
| '<p><b>Gender</b></p>' | |
| '<p><b>Mask</b></p>' | |
| '<p><b>Left Eye</b></p>' | |
| '<p><b>Right Eye</b></p>' | |
| '<p><b>Yaw</b></p>' | |
| '<p><b>Roll</b></p>' | |
| '<p><b>Pitch</b></p>' | |
| '</div>' | |
| '<div>' | |
| '<p>{age}</p>' | |
| '<p>{gender}</p>' | |
| '<p>{masked}</p>' | |
| '<p>{left_eye}</p>' | |
| '<p>{right_eye}</p>' | |
| '<p>{yaw}</p>' | |
| '<p>{roll}</p>' | |
| '<p>{pitch}</p>' | |
| '</div>' | |
| '</div>' | |
| '<div style="display: flex; gap: 20px; border-left: 1px solid #C0C0C0; padding-left: 20px;">' | |
| '<div>' | |
| '<p><b>Neutral</b></p>' | |
| '<p><b>Happy</b></p>' | |
| '<p><b>Angry</b></p>' | |
| '<p><b>Surprised</b></p>' | |
| '<p><b>Disgusted</b></p>' | |
| '<p><b>Sad</b></p>' | |
| '<p><b>Scared</b></p>' | |
| '</div>' | |
| '<div>' | |
| '<p>{neutral}</p>' | |
| '<p>{happy}</p>' | |
| '<p>{angry}</p>' | |
| '<p>{surprised}</p>' | |
| '<p>{disgusted}</p>' | |
| '<p>{sad}</p>' | |
| '<p>{scared}</p>' | |
| '</div>' | |
| '</div>' | |
| '</div></div>').format(face_value=face_value, | |
| age=age_gender.get('age'), | |
| gender="Female" if age_gender.get('gender') == 0 else "Male", | |
| neutral=f"{emotion.get('neutral'):.2f}", | |
| happy=f"{emotion.get('happy'):.2f}", | |
| angry=f"{emotion.get('angry'):.2f}", | |
| surprised=f"{emotion.get('surprised'):.2f}", | |
| disgusted=f"{emotion.get('disgusted'):.2f}", | |
| sad=f"{emotion.get('sad'):.2f}", | |
| scared=f"{emotion.get('scared'):.2f}", | |
| masked="Yes" if attribute.get('masked') == 1 else "No", | |
| left_eye="Open" if attribute.get('left_eye_opened') == 1 else "Close", | |
| right_eye="Open" if attribute.get('right_eye_opened') == 1 else "Close", | |
| yaw=f"{angles.get('yaw'):.2f}", | |
| roll=f"{angles.get('roll'):.2f}", | |
| pitch=f"{angles.get('pitch'):.2f}", | |
| ) | |
| html += item_value | |
| html += '<hr style="border: 1px solid #C0C0C0; margin: 10px 0;"/>' | |
| else: | |
| html = "No face!" | |
| return [html, json_result] | |
| else: | |
| return [result.text, {"resultCode": result.text}] | |
| with gr.Blocks() as demo: | |
| gr.Markdown( | |
| f""" | |
| <a href="https://recognito.vision" style="display: flex; align-items: center;"> | |
| <img src="https://recognito.vision/wp-content/uploads/2024/03/Recognito-modified.png" style="width: 8%; margin-right: 15px;"/> | |
| <div> | |
| <p style="font-size: 32px; font-weight: bold; margin: 0;">Recognito</p> | |
| <p style="font-size: 18px; margin: 0;">www.recognito.vision</p> | |
| </div> | |
| </a> | |
| <p style="font-size: 20px; font-weight: bold;">π Product Documentation</p> | |
| <div style="display: flex; align-items: center;"> | |
|   <a href="https://docs.recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/05/book.png" style="width: 48px; margin-right: 5px;"/></a> | |
| </div> | |
| <p style="font-size: 20px; font-weight: bold;">π Visit Recognito</p> | |
| <div style="display: flex; align-items: center;"> | |
|   <a href="https://recognito.vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/recognito_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> | |
| <a href="https://www.linkedin.com/company/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/linkedin_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> | |
| <a href="https://huggingface.co/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/hf_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> | |
| <a href="https://github.com/recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/github_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> | |
| <a href="https://hub.docker.com/u/recognito" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/03/docker_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> | |
| <a href="https://www.youtube.com/@recognito-vision" style="display: flex; align-items: center;"><img src="https://recognito.vision/wp-content/uploads/2024/04/youtube_64_cl.png" style="width: 32px; margin-right: 5px;"/></a> | |
| </div> | |
| <p style="font-size: 20px; font-weight: bold;">π€ Contact us for our on-premise ID Document Verification SDKs deployment</p> | |
| <div style="display: flex; align-items: center;"> | |
|   <a target="_blank" href="mailto:hello@recognito.vision"><img src="https://img.shields.io/badge/email-hassan@recognito.vision-blue.svg?logo=gmail " alt="www.recognito.vision"></a> | |
| <a target="_blank" href="https://wa.me/+14158003112"><img src="https://img.shields.io/badge/whatsapp-+14158003112-blue.svg?logo=whatsapp " alt="www.recognito.vision"></a> | |
| <a target="_blank" href="https://t.me/recognito_vision"><img src="https://img.shields.io/badge/telegram-@recognito__vision-blue.svg?logo=telegram " alt="www.recognito.vision"></a> | |
| <a target="_blank" href="https://join.slack.com/t/recognito-workspace/shared_invite/zt-2d4kscqgn-"><img src="https://img.shields.io/badge/slack-recognito__workspace-blue.svg?logo=slack " alt="www.recognito.vision"></a> | |
| </div> | |
| <br/> | |
| """ | |
| ) | |
| with gr.TabItem("Face Recognition"): | |
| with gr.Row(): | |
| with gr.Column(scale=7): | |
| with gr.Row(): | |
| with gr.Column(): | |
| image_input1 = gr.Image(type='pil') | |
| gr.Examples(['examples/1.webp', 'examples/2.webp', 'examples/3.webp', 'examples/4.webp'], | |
| inputs=image_input1) | |
| with gr.Column(): | |
| image_input2 = gr.Image(type='pil') | |
| gr.Examples(['examples/5.webp', 'examples/6.webp', 'examples/7.webp', 'examples/8.webp'], | |
| inputs=image_input2) | |
| verifyThreshold = gr.Slider(minimum=0, maximum=1, value=0.67, label="Verify Threshold") | |
| face_recog_button = gr.Button("Face Recognition") | |
| with gr.Column(scale=3): | |
| with gr.TabItem("Output"): | |
| recog_html_output = gr.HTML() | |
| with gr.TabItem("JSON"): | |
| recog_json_output = gr.JSON() | |
| with gr.TabItem("Face Attribute"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| image_input = gr.Image(type='pil') | |
| gr.Examples(['examples/11.webp', 'examples/12.webp', 'examples/13.webp', 'examples/14.webp'], | |
| inputs=image_input) | |
| face_attr_button = gr.Button("Face Attribute") | |
| with gr.Column(): | |
| with gr.TabItem("Output"): | |
| detect_html_output = gr.HTML() | |
| with gr.TabItem("JSON"): | |
| detect_json_output = gr.JSON() | |
| face_recog_button.click(compare_face, inputs=[image_input1, image_input2, verifyThreshold], outputs=[recog_html_output, recog_json_output]) | |
| face_attr_button.click(detect_face, inputs=[image_input], outputs=[detect_html_output, detect_json_output]) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |