File size: 4,432 Bytes
852a5d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import numpy as np
import cv2
from matplotlib import pyplot as plt
import torch
# In the below line, remove '.' while working on your local system.
# Keep '.' before face_recognition_model while uploading to the server
from .face_recognition_model import *
from PIL import Image
import base64
import io
import os
import joblib
import pickle
# Add more imports if required

###########################################################################################################################################
#         Caution: Don't change any of the filenames, function names and definitions                                                      #
#        Always use the current_path + file_name for refering any files, without it we cannot access files on the server                  # 
###########################################################################################################################################

# Current_path stores absolute path of the file from where it runs. 
current_path = os.path.dirname(os.path.abspath(__file__))

# -------------------------
# Face Detection
# -------------------------
def detected_face(image):
    eye_haar = current_path + '/haarcascade_eye.xml'
    face_haar = current_path + '/haarcascade_frontalface_default.xml'
    face_cascade = cv2.CascadeClassifier(face_haar)
    eye_cascade = cv2.CascadeClassifier(eye_haar)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    face_areas = []
    images = []
    required_image = 0
    for i, (x, y, w, h) in enumerate(faces):
        face_cropped = gray[y:y+h, x:x+w]
        face_areas.append(w*h)
        images.append(face_cropped)
        required_image = images[np.argmax(face_areas)]
        required_image = Image.fromarray(required_image)
    return required_image

# -------------------------
# Compute Similarity
# -------------------------
def get_similarity(img1, img2):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    # Detect faces
    det_img1 = detected_face(img1)
    det_img2 = detected_face(img2)
    if det_img1 == 0 or det_img2 == 0:
        det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
        det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
    
    # Transform images
    face1 = trnscm(det_img1).unsqueeze(0).to(device)
    face2 = trnscm(det_img2).unsqueeze(0).to(device)
    
    # -------------------------
    # Load Siamese Model
    # -------------------------
    model_path = current_path + '/siamese_model.t7'
    checkpoint = torch.load(model_path, map_location=device)
    feature_net = Siamese().to(device)
    feature_net.load_state_dict(checkpoint['net_dict'])
    feature_net.eval()
    
    # -------------------------
    # Compute similarity (Euclidean distance)
    # -------------------------
    with torch.no_grad():
        output1, output2 = feature_net(face1, face2)
        euclidean_distance = F.pairwise_distance(output1, output2)
        # Convert distance to similarity score (0–1)
        similarity_score = 1 / (1 + euclidean_distance.item())
    
    return round(similarity_score, 3)

# -------------------------
# Get Face Class
# -------------------------
def get_face_class(img1):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    det_img1 = detected_face(img1)
    if det_img1 == 0:
        det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
    
    # Transform image
    face = trnscm(det_img1).unsqueeze(0).to(device)
    
    # Load Siamese model
    model_path = current_path + '/siamese_model.t7'
    checkpoint = torch.load(model_path, map_location=device)
    feature_net = Siamese().to(device)
    feature_net.load_state_dict(checkpoint['net_dict'])
    feature_net.eval()
    
    # -------------------------
    # Use Siamese + classifier to predict class (if classifier exists)
    # -------------------------
    # If you have a trained classifier that takes embeddings from Siamese as input:
    # classifier = <your classifier>
    # with torch.no_grad():
    #     embedding = feature_net.forward_once(face)
    #     pred = classifier(embedding)
    #     predicted_class = classes[pred.argmax(dim=1).item()]
    
    # Since classifier is not trained here, return placeholder
    predicted_class = "YET TO BE CODED"
    
    return predicted_class