Spaces:
Sleeping
Sleeping
Kousik Kumar Siddavaram
commited on
Commit
·
391a18d
1
Parent(s):
b4a4baf
Add face recognition with Siamese embeddings and classifier
Browse files- app/Hackathon_setup/face_recognition.py +22 -35
- app/Hackathon_setup/face_recognition_bkp.py +115 -0
- app/Hackathon_setup/face_recognition_model.py +10 -12
- app/Hackathon_setup/face_recognition_model_bkp.py +80 -0
- app/Hackathon_setup/label_encoder.joblib +3 -0
- app/Hackathon_setup/team_classifier.joblib +3 -0
app/Hackathon_setup/face_recognition.py
CHANGED
|
@@ -2,25 +2,23 @@ import numpy as np
|
|
| 2 |
import cv2
|
| 3 |
from matplotlib import pyplot as plt
|
| 4 |
import torch
|
| 5 |
-
|
| 6 |
-
# Keep '.' before face_recognition_model while uploading to the server
|
| 7 |
-
from .face_recognition_model import *
|
| 8 |
from PIL import Image
|
| 9 |
-
import base64
|
| 10 |
-
import io
|
| 11 |
import os
|
| 12 |
import joblib
|
| 13 |
-
import
|
| 14 |
-
# Add more imports if required
|
| 15 |
-
|
| 16 |
-
###########################################################################################################################################
|
| 17 |
-
# Caution: Don't change any of the filenames, function names and definitions #
|
| 18 |
-
# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
|
| 19 |
-
###########################################################################################################################################
|
| 20 |
|
| 21 |
# Current_path stores absolute path of the file from where it runs.
|
| 22 |
current_path = os.path.dirname(os.path.abspath(__file__))
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
# -------------------------
|
| 25 |
# Face Detection
|
| 26 |
# -------------------------
|
|
@@ -46,8 +44,6 @@ def detected_face(image):
|
|
| 46 |
# Compute Similarity
|
| 47 |
# -------------------------
|
| 48 |
def get_similarity(img1, img2):
|
| 49 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 50 |
-
|
| 51 |
# Detect faces
|
| 52 |
det_img1 = detected_face(img1)
|
| 53 |
det_img2 = detected_face(img2)
|
|
@@ -59,22 +55,17 @@ def get_similarity(img1, img2):
|
|
| 59 |
face1 = trnscm(det_img1).unsqueeze(0).to(device)
|
| 60 |
face2 = trnscm(det_img2).unsqueeze(0).to(device)
|
| 61 |
|
| 62 |
-
#
|
| 63 |
-
# Load Siamese Model
|
| 64 |
-
# -------------------------
|
| 65 |
model_path = current_path + '/siamese_model.t7'
|
| 66 |
checkpoint = torch.load(model_path, map_location=device)
|
| 67 |
feature_net = Siamese().to(device)
|
| 68 |
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 69 |
feature_net.eval()
|
| 70 |
|
| 71 |
-
#
|
| 72 |
-
# Compute similarity (Euclidean distance)
|
| 73 |
-
# -------------------------
|
| 74 |
with torch.no_grad():
|
| 75 |
output1, output2 = feature_net(face1, face2)
|
| 76 |
euclidean_distance = F.pairwise_distance(output1, output2)
|
| 77 |
-
# Convert distance to similarity score (0–1)
|
| 78 |
similarity_score = 1 / (1 + euclidean_distance.item())
|
| 79 |
|
| 80 |
return round(similarity_score, 3)
|
|
@@ -83,8 +74,7 @@ def get_similarity(img1, img2):
|
|
| 83 |
# Get Face Class
|
| 84 |
# -------------------------
|
| 85 |
def get_face_class(img1):
|
| 86 |
-
|
| 87 |
-
|
| 88 |
det_img1 = detected_face(img1)
|
| 89 |
if det_img1 == 0:
|
| 90 |
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
|
@@ -99,17 +89,14 @@ def get_face_class(img1):
|
|
| 99 |
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 100 |
feature_net.eval()
|
| 101 |
|
| 102 |
-
#
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
# classifier = <your classifier>
|
| 107 |
-
# with torch.no_grad():
|
| 108 |
-
# embedding = feature_net.forward_once(face)
|
| 109 |
-
# pred = classifier(embedding)
|
| 110 |
-
# predicted_class = classes[pred.argmax(dim=1).item()]
|
| 111 |
|
| 112 |
-
#
|
| 113 |
-
|
|
|
|
|
|
|
| 114 |
|
| 115 |
-
return predicted_class
|
|
|
|
| 2 |
import cv2
|
| 3 |
from matplotlib import pyplot as plt
|
| 4 |
import torch
|
| 5 |
+
from .face_recognition_model import Siamese, trnscm, device
|
|
|
|
|
|
|
| 6 |
from PIL import Image
|
|
|
|
|
|
|
| 7 |
import os
|
| 8 |
import joblib
|
| 9 |
+
import torch.nn.functional as F
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
# Current_path stores absolute path of the file from where it runs.
|
| 12 |
current_path = os.path.dirname(os.path.abspath(__file__))
|
| 13 |
|
| 14 |
+
# -------------------------
|
| 15 |
+
# Load trained classifier and label encoder
|
| 16 |
+
# -------------------------
|
| 17 |
+
clf_path = os.path.join(current_path, "team_classifier.joblib")
|
| 18 |
+
le_path = os.path.join(current_path, "label_encoder.joblib")
|
| 19 |
+
clf = joblib.load(clf_path)
|
| 20 |
+
le = joblib.load(le_path)
|
| 21 |
+
|
| 22 |
# -------------------------
|
| 23 |
# Face Detection
|
| 24 |
# -------------------------
|
|
|
|
| 44 |
# Compute Similarity
|
| 45 |
# -------------------------
|
| 46 |
def get_similarity(img1, img2):
|
|
|
|
|
|
|
| 47 |
# Detect faces
|
| 48 |
det_img1 = detected_face(img1)
|
| 49 |
det_img2 = detected_face(img2)
|
|
|
|
| 55 |
face1 = trnscm(det_img1).unsqueeze(0).to(device)
|
| 56 |
face2 = trnscm(det_img2).unsqueeze(0).to(device)
|
| 57 |
|
| 58 |
+
# Load Siamese model
|
|
|
|
|
|
|
| 59 |
model_path = current_path + '/siamese_model.t7'
|
| 60 |
checkpoint = torch.load(model_path, map_location=device)
|
| 61 |
feature_net = Siamese().to(device)
|
| 62 |
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 63 |
feature_net.eval()
|
| 64 |
|
| 65 |
+
# Compute similarity
|
|
|
|
|
|
|
| 66 |
with torch.no_grad():
|
| 67 |
output1, output2 = feature_net(face1, face2)
|
| 68 |
euclidean_distance = F.pairwise_distance(output1, output2)
|
|
|
|
| 69 |
similarity_score = 1 / (1 + euclidean_distance.item())
|
| 70 |
|
| 71 |
return round(similarity_score, 3)
|
|
|
|
| 74 |
# Get Face Class
|
| 75 |
# -------------------------
|
| 76 |
def get_face_class(img1):
|
| 77 |
+
# Detect face
|
|
|
|
| 78 |
det_img1 = detected_face(img1)
|
| 79 |
if det_img1 == 0:
|
| 80 |
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
|
|
|
| 89 |
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 90 |
feature_net.eval()
|
| 91 |
|
| 92 |
+
# Get embedding
|
| 93 |
+
with torch.no_grad():
|
| 94 |
+
embedding = feature_net.forward_once(face)
|
| 95 |
+
embedding_np = embedding.cpu().numpy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
|
| 97 |
+
# Predict class using trained classifier
|
| 98 |
+
pred_idx = clf.predict(embedding_np)[0]
|
| 99 |
+
pred_proba = clf.predict_proba(embedding_np).max()
|
| 100 |
+
predicted_class = le.inverse_transform([pred_idx])[0]
|
| 101 |
|
| 102 |
+
return {"name": predicted_class, "probability": float(pred_proba)}
|
app/Hackathon_setup/face_recognition_bkp.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
from matplotlib import pyplot as plt
|
| 4 |
+
import torch
|
| 5 |
+
# In the below line, remove '.' while working on your local system.
|
| 6 |
+
# Keep '.' before face_recognition_model while uploading to the server
|
| 7 |
+
from .face_recognition_model import *
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import base64
|
| 10 |
+
import io
|
| 11 |
+
import os
|
| 12 |
+
import joblib
|
| 13 |
+
import pickle
|
| 14 |
+
# Add more imports if required
|
| 15 |
+
|
| 16 |
+
###########################################################################################################################################
|
| 17 |
+
# Caution: Don't change any of the filenames, function names and definitions #
|
| 18 |
+
# Always use the current_path + file_name for refering any files, without it we cannot access files on the server #
|
| 19 |
+
###########################################################################################################################################
|
| 20 |
+
|
| 21 |
+
# Current_path stores absolute path of the file from where it runs.
|
| 22 |
+
current_path = os.path.dirname(os.path.abspath(__file__))
|
| 23 |
+
|
| 24 |
+
# -------------------------
|
| 25 |
+
# Face Detection
|
| 26 |
+
# -------------------------
|
| 27 |
+
def detected_face(image):
|
| 28 |
+
eye_haar = current_path + '/haarcascade_eye.xml'
|
| 29 |
+
face_haar = current_path + '/haarcascade_frontalface_default.xml'
|
| 30 |
+
face_cascade = cv2.CascadeClassifier(face_haar)
|
| 31 |
+
eye_cascade = cv2.CascadeClassifier(eye_haar)
|
| 32 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 33 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
| 34 |
+
face_areas = []
|
| 35 |
+
images = []
|
| 36 |
+
required_image = 0
|
| 37 |
+
for i, (x, y, w, h) in enumerate(faces):
|
| 38 |
+
face_cropped = gray[y:y+h, x:x+w]
|
| 39 |
+
face_areas.append(w*h)
|
| 40 |
+
images.append(face_cropped)
|
| 41 |
+
required_image = images[np.argmax(face_areas)]
|
| 42 |
+
required_image = Image.fromarray(required_image)
|
| 43 |
+
return required_image
|
| 44 |
+
|
| 45 |
+
# -------------------------
|
| 46 |
+
# Compute Similarity
|
| 47 |
+
# -------------------------
|
| 48 |
+
def get_similarity(img1, img2):
|
| 49 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 50 |
+
|
| 51 |
+
# Detect faces
|
| 52 |
+
det_img1 = detected_face(img1)
|
| 53 |
+
det_img2 = detected_face(img2)
|
| 54 |
+
if det_img1 == 0 or det_img2 == 0:
|
| 55 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 56 |
+
det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
|
| 57 |
+
|
| 58 |
+
# Transform images
|
| 59 |
+
face1 = trnscm(det_img1).unsqueeze(0).to(device)
|
| 60 |
+
face2 = trnscm(det_img2).unsqueeze(0).to(device)
|
| 61 |
+
|
| 62 |
+
# -------------------------
|
| 63 |
+
# Load Siamese Model
|
| 64 |
+
# -------------------------
|
| 65 |
+
model_path = current_path + '/siamese_model.t7'
|
| 66 |
+
checkpoint = torch.load(model_path, map_location=device)
|
| 67 |
+
feature_net = Siamese().to(device)
|
| 68 |
+
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 69 |
+
feature_net.eval()
|
| 70 |
+
|
| 71 |
+
# -------------------------
|
| 72 |
+
# Compute similarity (Euclidean distance)
|
| 73 |
+
# -------------------------
|
| 74 |
+
with torch.no_grad():
|
| 75 |
+
output1, output2 = feature_net(face1, face2)
|
| 76 |
+
euclidean_distance = F.pairwise_distance(output1, output2)
|
| 77 |
+
# Convert distance to similarity score (0–1)
|
| 78 |
+
similarity_score = 1 / (1 + euclidean_distance.item())
|
| 79 |
+
|
| 80 |
+
return round(similarity_score, 3)
|
| 81 |
+
|
| 82 |
+
# -------------------------
|
| 83 |
+
# Get Face Class
|
| 84 |
+
# -------------------------
|
| 85 |
+
def get_face_class(img1):
|
| 86 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 87 |
+
|
| 88 |
+
det_img1 = detected_face(img1)
|
| 89 |
+
if det_img1 == 0:
|
| 90 |
+
det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
|
| 91 |
+
|
| 92 |
+
# Transform image
|
| 93 |
+
face = trnscm(det_img1).unsqueeze(0).to(device)
|
| 94 |
+
|
| 95 |
+
# Load Siamese model
|
| 96 |
+
model_path = current_path + '/siamese_model.t7'
|
| 97 |
+
checkpoint = torch.load(model_path, map_location=device)
|
| 98 |
+
feature_net = Siamese().to(device)
|
| 99 |
+
feature_net.load_state_dict(checkpoint['net_dict'])
|
| 100 |
+
feature_net.eval()
|
| 101 |
+
|
| 102 |
+
# -------------------------
|
| 103 |
+
# Use Siamese + classifier to predict class (if classifier exists)
|
| 104 |
+
# -------------------------
|
| 105 |
+
# If you have a trained classifier that takes embeddings from Siamese as input:
|
| 106 |
+
# classifier = <your classifier>
|
| 107 |
+
# with torch.no_grad():
|
| 108 |
+
# embedding = feature_net.forward_once(face)
|
| 109 |
+
# pred = classifier(embedding)
|
| 110 |
+
# predicted_class = classes[pred.argmax(dim=1).item()]
|
| 111 |
+
|
| 112 |
+
# Since classifier is not trained here, return placeholder
|
| 113 |
+
predicted_class = "YET TO BE CODED"
|
| 114 |
+
|
| 115 |
+
return predicted_class
|
app/Hackathon_setup/face_recognition_model.py
CHANGED
|
@@ -4,8 +4,11 @@ import torchvision
|
|
| 4 |
import torch.nn as nn
|
| 5 |
import torch.nn.functional as F
|
| 6 |
from torchvision import transforms
|
| 7 |
-
# Add more imports if required
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
# ---------------------------
|
| 11 |
# Transformation Function
|
|
@@ -16,11 +19,10 @@ trnscm = transforms.Compose([
|
|
| 16 |
transforms.ToTensor()
|
| 17 |
])
|
| 18 |
|
| 19 |
-
|
| 20 |
# ---------------------------
|
| 21 |
# Siamese Network Definition
|
| 22 |
# ---------------------------
|
| 23 |
-
class Siamese(
|
| 24 |
def __init__(self):
|
| 25 |
super(Siamese, self).__init__()
|
| 26 |
|
|
@@ -54,7 +56,7 @@ class Siamese(torch.nn.Module):
|
|
| 54 |
def forward_once(self, x):
|
| 55 |
# Forward pass for one image
|
| 56 |
output = self.cnn1(x)
|
| 57 |
-
output = output.view(output.size()
|
| 58 |
output = self.fc1(output)
|
| 59 |
return output
|
| 60 |
|
|
@@ -64,17 +66,13 @@ class Siamese(torch.nn.Module):
|
|
| 64 |
output2 = self.forward_once(x2)
|
| 65 |
return output1, output2
|
| 66 |
|
| 67 |
-
|
| 68 |
##########################################################################################################
|
| 69 |
-
##
|
| 70 |
-
##
|
| 71 |
##########################################################################################################
|
| 72 |
-
|
| 73 |
-
# Not used for face similarity — so keep it as None
|
| 74 |
-
classifier = None
|
| 75 |
-
|
| 76 |
|
| 77 |
# ---------------------------
|
| 78 |
-
# Class labels (optional)
|
| 79 |
# ---------------------------
|
| 80 |
classes = ['person1', 'person2', 'person3', 'person4', 'person5', 'person6', 'person7']
|
|
|
|
| 4 |
import torch.nn as nn
|
| 5 |
import torch.nn.functional as F
|
| 6 |
from torchvision import transforms
|
|
|
|
| 7 |
|
| 8 |
+
# ---------------------------
|
| 9 |
+
# Device configuration
|
| 10 |
+
# ---------------------------
|
| 11 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 12 |
|
| 13 |
# ---------------------------
|
| 14 |
# Transformation Function
|
|
|
|
| 19 |
transforms.ToTensor()
|
| 20 |
])
|
| 21 |
|
|
|
|
| 22 |
# ---------------------------
|
| 23 |
# Siamese Network Definition
|
| 24 |
# ---------------------------
|
| 25 |
+
class Siamese(nn.Module):
|
| 26 |
def __init__(self):
|
| 27 |
super(Siamese, self).__init__()
|
| 28 |
|
|
|
|
| 56 |
def forward_once(self, x):
|
| 57 |
# Forward pass for one image
|
| 58 |
output = self.cnn1(x)
|
| 59 |
+
output = output.view(output.size(0), -1)
|
| 60 |
output = self.fc1(output)
|
| 61 |
return output
|
| 62 |
|
|
|
|
| 66 |
output2 = self.forward_once(x2)
|
| 67 |
return output1, output2
|
| 68 |
|
|
|
|
| 69 |
##########################################################################################################
|
| 70 |
+
## Classifier for face recognition
|
| 71 |
+
## Not used for face similarity; now we use a Sklearn classifier separately
|
| 72 |
##########################################################################################################
|
| 73 |
+
classifier = None # Keep as None; we use joblib-loaded Sklearn model in facerecognition.py
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
# ---------------------------
|
| 76 |
+
# Class labels (optional, for reference)
|
| 77 |
# ---------------------------
|
| 78 |
classes = ['person1', 'person2', 'person3', 'person4', 'person5', 'person6', 'person7']
|
app/Hackathon_setup/face_recognition_model_bkp.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import torch
|
| 3 |
+
import torchvision
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from torchvision import transforms
|
| 7 |
+
# Add more imports if required
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# ---------------------------
|
| 11 |
+
# Transformation Function
|
| 12 |
+
# ---------------------------
|
| 13 |
+
# Same transforms as used during training in Colab
|
| 14 |
+
trnscm = transforms.Compose([
|
| 15 |
+
transforms.Resize((100, 100)),
|
| 16 |
+
transforms.ToTensor()
|
| 17 |
+
])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# ---------------------------
|
| 21 |
+
# Siamese Network Definition
|
| 22 |
+
# ---------------------------
|
| 23 |
+
class Siamese(torch.nn.Module):
|
| 24 |
+
def __init__(self):
|
| 25 |
+
super(Siamese, self).__init__()
|
| 26 |
+
|
| 27 |
+
# CNN layers (same as your Colab model)
|
| 28 |
+
self.cnn1 = nn.Sequential(
|
| 29 |
+
nn.ReflectionPad2d(1),
|
| 30 |
+
nn.Conv2d(1, 4, kernel_size=3),
|
| 31 |
+
nn.ReLU(inplace=True),
|
| 32 |
+
nn.BatchNorm2d(4),
|
| 33 |
+
|
| 34 |
+
nn.ReflectionPad2d(1),
|
| 35 |
+
nn.Conv2d(4, 8, kernel_size=3),
|
| 36 |
+
nn.ReLU(inplace=True),
|
| 37 |
+
nn.BatchNorm2d(8),
|
| 38 |
+
|
| 39 |
+
nn.ReflectionPad2d(1),
|
| 40 |
+
nn.Conv2d(8, 8, kernel_size=3),
|
| 41 |
+
nn.ReLU(inplace=True),
|
| 42 |
+
nn.BatchNorm2d(8)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# Fully connected layers
|
| 46 |
+
self.fc1 = nn.Sequential(
|
| 47 |
+
nn.Linear(8 * 100 * 100, 500),
|
| 48 |
+
nn.ReLU(inplace=True),
|
| 49 |
+
nn.Linear(500, 500),
|
| 50 |
+
nn.ReLU(inplace=True),
|
| 51 |
+
nn.Linear(500, 5)
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def forward_once(self, x):
|
| 55 |
+
# Forward pass for one image
|
| 56 |
+
output = self.cnn1(x)
|
| 57 |
+
output = output.view(output.size()[0], -1)
|
| 58 |
+
output = self.fc1(output)
|
| 59 |
+
return output
|
| 60 |
+
|
| 61 |
+
def forward(self, x1, x2):
|
| 62 |
+
# Forward pass for both images
|
| 63 |
+
output1 = self.forward_once(x1)
|
| 64 |
+
output2 = self.forward_once(x2)
|
| 65 |
+
return output1, output2
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
##########################################################################################################
|
| 69 |
+
## Sample classification network (Specify if you are using a pytorch classifier during the training) ##
|
| 70 |
+
## classifier = nn.Sequential(nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Linear...) ##
|
| 71 |
+
##########################################################################################################
|
| 72 |
+
|
| 73 |
+
# Not used for face similarity — so keep it as None
|
| 74 |
+
classifier = None
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
# ---------------------------
|
| 78 |
+
# Class labels (optional)
|
| 79 |
+
# ---------------------------
|
| 80 |
+
classes = ['person1', 'person2', 'person3', 'person4', 'person5', 'person6', 'person7']
|
app/Hackathon_setup/label_encoder.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6613233d2442a0938635221fec29c91fcf4024f791c79465777286a70349ec97
|
| 3 |
+
size 351
|
app/Hackathon_setup/team_classifier.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b9c07b411ab1fa2f2365146b188ea0d4438b5317b88c81b256766c4c5ada3e93
|
| 3 |
+
size 1761
|