Spaces:
Sleeping
Sleeping
ADD: demo
Browse files- app.py +105 -0
- detection.py +85 -0
- model.py +12 -0
- pretrained_models/facial/clip_weights.pth +3 -0
- pretrained_models/facial/dino_weights.pth +3 -0
- pretrained_models/facial/mobileclip_weights.pth +3 -0
- pretrained_models/general/clip_weights.pth +3 -0
- pretrained_models/general/dino_weights.pth +3 -0
- pretrained_models/general/mobileclip_weights.pth +3 -0
app.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import cv2
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import torch
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from transformers import AutoImageProcessor, AutoProcessor, AutoModel, CLIPVisionModel
|
| 8 |
+
from detection import detect_image, detect_video
|
| 9 |
+
from model import LinearClassifier
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def load_model(detection_type):
|
| 13 |
+
|
| 14 |
+
device = torch.device("cpu")
|
| 15 |
+
|
| 16 |
+
processor = AutoProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
| 17 |
+
clip_model = CLIPVisionModel.from_pretrained("openai/clip-vit-large-patch14", output_attentions=True)
|
| 18 |
+
|
| 19 |
+
model_path = f"pretrained_models/{detection_type}/clip_weights.pth"
|
| 20 |
+
checkpoint = torch.load(model_path, map_location="cpu")
|
| 21 |
+
input_dim = checkpoint["linear.weight"].shape[1]
|
| 22 |
+
|
| 23 |
+
detection_model = LinearClassifier(input_dim)
|
| 24 |
+
detection_model.load_state_dict(checkpoint)
|
| 25 |
+
detection_model = detection_model.to(device)
|
| 26 |
+
|
| 27 |
+
return processor, clip_model, detection_model
|
| 28 |
+
|
| 29 |
+
def process_image(image, detection_type):
|
| 30 |
+
processor, clip_model, detection_model = load_model(detection_type)
|
| 31 |
+
|
| 32 |
+
results = detect_image(image, processor, clip_model, detection_model)
|
| 33 |
+
|
| 34 |
+
pred_score = results["pred_score"]
|
| 35 |
+
attn_map = results["attn_map"]
|
| 36 |
+
|
| 37 |
+
return pred_score, attn_map
|
| 38 |
+
|
| 39 |
+
def process_video(video, detection_type):
|
| 40 |
+
processor, clip_model, detection_model = load_model(detection_type)
|
| 41 |
+
|
| 42 |
+
cap = cv2.VideoCapture(video)
|
| 43 |
+
frames = []
|
| 44 |
+
while True:
|
| 45 |
+
ret, frame = cap.read()
|
| 46 |
+
if not ret:
|
| 47 |
+
break
|
| 48 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 49 |
+
pil_image = Image.fromarray(frame)
|
| 50 |
+
frames.append(pil_image)
|
| 51 |
+
cap.release()
|
| 52 |
+
|
| 53 |
+
results = detect_video(frames, processor, clip_model, detection_model)
|
| 54 |
+
|
| 55 |
+
pred_score = results["pred_score"]
|
| 56 |
+
attn_map = results["attn_map"]
|
| 57 |
+
|
| 58 |
+
return pred_score, attn_map
|
| 59 |
+
|
| 60 |
+
def change_input(input_type):
|
| 61 |
+
if input_type == "Image":
|
| 62 |
+
return gr.update(visible=True), gr.update(visible=False)
|
| 63 |
+
elif input_type == "Video":
|
| 64 |
+
return gr.update(visible=False), gr.update(visible=True)
|
| 65 |
+
else:
|
| 66 |
+
return None
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def process_input(input_type, model_type, image, video):
|
| 70 |
+
detection_type = "facial" if model_type == "Facial" else "general"
|
| 71 |
+
|
| 72 |
+
if input_type == "Image" and image is not None:
|
| 73 |
+
return process_image(image, detection_type)
|
| 74 |
+
elif input_type == "Video" and video is not None:
|
| 75 |
+
return process_video(video, detection_type)
|
| 76 |
+
else:
|
| 77 |
+
return None, None
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
with gr.Blocks() as demo:
|
| 81 |
+
|
| 82 |
+
gr.Markdown("## Deepfake Detection : Facial / General")
|
| 83 |
+
|
| 84 |
+
input_type = gr.Radio(["Image", "Video"], label="Choose Input Type", value="Image")
|
| 85 |
+
|
| 86 |
+
model_type = gr.Radio(["Facial", "General"], label="Choose Model Type", value="General")
|
| 87 |
+
|
| 88 |
+
image_input = gr.Image(type="pil", label="Upload Image", visible=True)
|
| 89 |
+
video_input = gr.Video(label="Upload Video", visible=False)
|
| 90 |
+
|
| 91 |
+
process_button = gr.Button("Run Model")
|
| 92 |
+
|
| 93 |
+
pred_score_output = gr.Textbox(label="Prediction Score")
|
| 94 |
+
attn_map_output = gr.Image(type="pil", label="Attention Map")
|
| 95 |
+
|
| 96 |
+
input_type.change(fn=change_input, inputs=[input_type], outputs=[image_input, video_input])
|
| 97 |
+
|
| 98 |
+
process_button.click(
|
| 99 |
+
fn=process_input,
|
| 100 |
+
inputs=[input_type, model_type, image_input, video_input],
|
| 101 |
+
outputs=[pred_score_output, attn_map_output]
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
if __name__ == "__main__":
|
| 105 |
+
demo.launch()
|
detection.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def vis_attn(image, patch_attention_map, alpha=0.5, vis_option="none"):
|
| 9 |
+
|
| 10 |
+
image = np.array(image)
|
| 11 |
+
H, W, _ = image.shape
|
| 12 |
+
|
| 13 |
+
seq_len = patch_attention_map.shape[0]
|
| 14 |
+
grid_size = int(seq_len ** 0.5)
|
| 15 |
+
|
| 16 |
+
patch_attention_map = patch_attention_map.reshape(grid_size, grid_size)
|
| 17 |
+
patch_attention_map = cv2.resize(patch_attention_map.cpu().detach().numpy(), (W, H), interpolation=cv2.INTER_CUBIC)
|
| 18 |
+
|
| 19 |
+
patch_attention_map = (patch_attention_map - patch_attention_map.min()) / (patch_attention_map.max() - patch_attention_map.min())
|
| 20 |
+
patch_attention_map = np.uint8(255 * patch_attention_map)
|
| 21 |
+
|
| 22 |
+
heatmap = cv2.applyColorMap(patch_attention_map, cv2.COLORMAP_JET)
|
| 23 |
+
|
| 24 |
+
blended_image = cv2.addWeighted(image, 1 - alpha, heatmap, alpha, 0)
|
| 25 |
+
blended_image = cv2.cvtColor(blended_image, cv2.COLOR_RGB2BGR)
|
| 26 |
+
blended_image = Image.fromarray(blended_image)
|
| 27 |
+
|
| 28 |
+
return blended_image
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def detect_image(image, processor, clip_model, detection_model):
|
| 32 |
+
|
| 33 |
+
inputs = processor(images=image, return_tensors="pt")
|
| 34 |
+
with torch.no_grad():
|
| 35 |
+
outputs = clip_model(**inputs)
|
| 36 |
+
|
| 37 |
+
last_hidden_states = outputs.last_hidden_state[:, 0, :]
|
| 38 |
+
|
| 39 |
+
pred_score = float(detection_model(last_hidden_states)[0][0].cpu().detach().numpy())
|
| 40 |
+
assert 0 <= pred_score <= 1
|
| 41 |
+
|
| 42 |
+
for layer_idx in range(len(outputs.attentions)):
|
| 43 |
+
attn_map = outputs.attentions[layer_idx]
|
| 44 |
+
if layer_idx == 0:
|
| 45 |
+
last_layer_attn = attn_map
|
| 46 |
+
else:
|
| 47 |
+
if layer_idx < 6:
|
| 48 |
+
last_layer_attn += attn_map
|
| 49 |
+
|
| 50 |
+
head_mean_attn = last_layer_attn.mean(dim=1)[0]
|
| 51 |
+
cls_attention_map = head_mean_attn[0, 1:]
|
| 52 |
+
|
| 53 |
+
blended_image = vis_attn(image, cls_attention_map)
|
| 54 |
+
|
| 55 |
+
results = {
|
| 56 |
+
"pred_score": pred_score,
|
| 57 |
+
"attn_map": blended_image,
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
return results
|
| 61 |
+
|
| 62 |
+
def detect_video(frames, processor, clip_model, detection_model):
|
| 63 |
+
image = frames[0]
|
| 64 |
+
|
| 65 |
+
inputs = processor(images=image, return_tensors="pt")
|
| 66 |
+
with torch.no_grad():
|
| 67 |
+
outputs = clip_model(**inputs)
|
| 68 |
+
|
| 69 |
+
last_hidden_states = outputs.last_hidden_state[:, 0, :]
|
| 70 |
+
|
| 71 |
+
pred_score = float(detection_model(last_hidden_states)[0][0].cpu().detach().numpy())
|
| 72 |
+
assert 0 <= pred_score <= 1
|
| 73 |
+
|
| 74 |
+
attention_maps = outputs.attentions[-1].cpu().detach().numpy()
|
| 75 |
+
cls_attention_map = attention_maps[:, :, 0, :]
|
| 76 |
+
cls_attention_map = cls_attention_map.mean(axis=0)
|
| 77 |
+
|
| 78 |
+
blended_image = vis_attn(image, cls_attention_map)
|
| 79 |
+
|
| 80 |
+
results = {
|
| 81 |
+
"pred_score": pred_score,
|
| 82 |
+
"attn_map": blended_image,
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
return results
|
model.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
class LinearClassifier(torch.nn.Module):
|
| 4 |
+
def __init__(self, input_dim):
|
| 5 |
+
super(LinearClassifier, self).__init__()
|
| 6 |
+
self.linear = torch.nn.Linear(input_dim, 1)
|
| 7 |
+
self.sigmoid = torch.nn.Sigmoid()
|
| 8 |
+
|
| 9 |
+
def forward(self, x):
|
| 10 |
+
x = self.linear(x)
|
| 11 |
+
x = self.sigmoid(x)
|
| 12 |
+
return x
|
pretrained_models/facial/clip_weights.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59d99962ba4c7697416755ec815dc355d9694f957e4a76806e891a669bb33c5b
|
| 3 |
+
size 5686
|
pretrained_models/facial/dino_weights.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bdec7ff5d97ba4101085352756a546c15829d9e5910934e66fd7cc13eb5458f
|
| 3 |
+
size 5686
|
pretrained_models/facial/mobileclip_weights.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e94c2912a932b906dc0b2cd062b1c591948656627e48ce187ab6a3dd508a8a4
|
| 3 |
+
size 3674
|
pretrained_models/general/clip_weights.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:189df51aab3791bea65305e3c2807341fea7bec4cb2019cc8e58d6b217c59deb
|
| 3 |
+
size 5686
|
pretrained_models/general/dino_weights.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43059530466fce466a0e242a1523936a9bb2f60782915ad42837f974dbbcaad1
|
| 3 |
+
size 5674
|
pretrained_models/general/mobileclip_weights.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c09f581e3fb36c74444b74f172f8ae5a601e53071b61f855f626ac37fd687875
|
| 3 |
+
size 3674
|