# Creating a common fun 0-0OOction which is usable to print the accuracy metrics of different models
def evaluate_performance(actual, pred):
# Accuracy Score
acc_score = round(accuracy_score(actual, pred)*100,2)
# Confusion matrix
confusion = confusion_matrix(actual, pred)
TP = confusion[1,1] # true positive
TN = confusion[0,0] # true negatives
FP = confusion[0,1] # false positives
FN = confusion[1,0] # false negatives
# Calculating Sensitivity/Recall
sensitivity_recall = (TP / float(TP + FN))
sensitivity_recall = round(sensitivity_recall,2)
# Calculating Specificity
specificity = (TN / float(TN + FP))
specificity = round(specificity,2)
# Calculating Precision
precision = (TN / float(TN + FP))
precision = round(precision,2)
# Calculating F_1 score
f1_score = 2 * ((precision * sensitivity_recall) / (precision + sensitivity_recall))
f1_score = round(f1_score,2)
return pd.DataFrame([{"TP":TP,"TN":TN,"FP":FP,"FN":FN,"Recall":sensitivity_recall,"Precision":precision,"Specificity":specificity,"F1-Score":f1_score,"Accuracy":acc_score}])
No comments:
Post a Comment