-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluator.py
executable file
·48 lines (43 loc) · 1.6 KB
/
evaluator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import math
import numpy as np
import scipy.stats as sps
from sklearn.metrics import accuracy_score, matthews_corrcoef, mean_squared_error
def evaluate(prediction, ground_truth, hinge=False, reg=False):
if prediction.shape[-1] == 1:
prediction = np.squeeze(prediction)
assert ground_truth.shape == prediction.shape, 'shape mis-match'
performance = {}
if reg:
performance['mse'] = mean_squared_error(np.squeeze(ground_truth), np.squeeze(prediction))
return performance
if hinge:
pred = (np.sign(prediction) + 1) / 2
for ind, p in enumerate(pred):
v = p[0]
if abs(p[0] - 0.5) < 1e-8 or np.isnan(p[0]):
pred[ind][0] = 0
else:
pred = np.round(prediction)
try:
performance['acc'] = accuracy_score(ground_truth, pred)
except Exception:
np.savetxt('prediction', pred, delimiter=',')
exit(0)
performance['mcc'] = matthews_corrcoef(ground_truth, pred)
return performance
def compare(current_performance, origin_performance):
is_better = {}
for metric_name in origin_performance.keys():
if metric_name == 'mse':
if current_performance[metric_name] < \
origin_performance[metric_name]:
is_better[metric_name] = True
else:
is_better[metric_name] = False
else:
if current_performance[metric_name] > \
origin_performance[metric_name]:
is_better[metric_name] = True
else:
is_better[metric_name] = False
return is_better