-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathexample_based.py
108 lines (69 loc) · 2.48 KB
/
example_based.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import numpy as np
def example_based_accuracy(y_true_, y_pred_):
# pre-process the inputs
y_true = []
y_pred = []
for true, pred in zip(y_true_, y_pred_):
if sum(true+pred) > 0:
y_true.append(true)
y_pred.append(pred)
if len(y_true) == 0:
return float(0)
# compute true positives using the logical AND operator
numerator = np.sum(np.logical_and(y_true, y_pred), axis = 1)
# compute true_positive + false negatives + false positive using the logical OR operator
denominator = np.sum(np.logical_or(y_true, y_pred), axis = 1)
instance_accuracy = numerator/denominator
avg_accuracy = np.mean(instance_accuracy)
return float(avg_accuracy)
def example_based_recall(y_true_, y_pred_):
# pre-process the inputs
y_true = []
y_pred = []
for true, pred in zip(y_true_, y_pred_):
if sum(true) > 0:
y_true.append(true)
y_pred.append(pred)
if len(y_true) == 0:
return float(0)
# Compute True Positive
recall_num = np.sum(np.logical_and(y_true, y_pred), axis = 1)
# Total number of actual true labels
recall_den = np.sum(y_true, axis = 1)
# recall averaged over all training examples
avg_recall = np.mean(recall_num/recall_den)
return float(avg_recall)
def example_based_precision(y_true_, y_pred_):
# pre-process the inputs
y_true = []
y_pred = []
for true, pred in zip(y_true_, y_pred_):
if sum(pred) > 0:
y_true.append(true)
y_pred.append(pred)
if len(y_true) == 0:
return float(0)
# Compute True Positive
prec_num = np.sum(np.logical_and(y_true, y_pred), axis = 1)
# Total number of actual true labels
prec_den = np.sum(y_pred, axis = 1)
# recall averaged over all training examples
avg_prec = np.mean(prec_num/prec_den)
return float(avg_prec)
def example_based_f1(y_true_, y_pred_):
# pre-process the inputs
y_true = []
y_pred = []
for true, pred in zip(y_true_, y_pred_):
if sum(true+pred) > 0:
y_true.append(true)
y_pred.append(pred)
if len(y_true) == 0:
return float(0)
# Compute True Positive
f1_num = np.sum(np.logical_and(y_true, y_pred), axis = 1)
# Total number of actual true labels
f1_den = np.sum(y_pred, axis = 1)+np.sum(y_true, axis = 1)
# recall averaged over all training examples
avg_f1 = np.mean((2*f1_num)/f1_den)
return float(avg_f1)