File size: 1,794 Bytes
d39b279 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import models
import time
import torch
import math
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score, average_precision_score, roc_auc_score
def eval_model(cfg, model, val_loader, loss_ce, val_batch_size):
model.eval()
outpred_list = []
gt_label_list = []
video_list = []
valLoss = 0
lossTrainNorm = 0
print("******** Start Testing. ********")
with torch.no_grad(): # No need to track gradients during validation
for i, (_, input, target, binary_label, video_id) in enumerate(tqdm(val_loader, desc="Validation", total=len(val_loader))):
if i == 0:
ss_time = time.time()
input = input[:,0]
varInput = torch.autograd.Variable(input.float().cuda())
varTarget = torch.autograd.Variable(target.contiguous().cuda())
var_Binary_Target = torch.autograd.Variable(binary_label.contiguous().cuda())
logit = model(varInput)
lossvalue = loss_ce(logit, var_Binary_Target)
valLoss += lossvalue.item()
lossTrainNorm += 1
outpred_list.append(logit[:,0].sigmoid().cpu().detach().numpy())
gt_label_list.append(varTarget.cpu().detach().numpy())
video_list.append(video_id)
valLoss = valLoss / lossTrainNorm
outpred = np.concatenate(outpred_list, 0)
gt_label = np.concatenate(gt_label_list, 0)
video_list = np.concatenate(video_list, 0)
pred_labels = [1 if item > 0.5 else 0 for item in outpred]
true_labels = np.argmax(gt_label, axis=1)
pred_accuracy = accuracy_score(true_labels, pred_labels)
return pred_accuracy, video_list, pred_labels, true_labels, outpred |