-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathacquisition_function.py
62 lines (51 loc) · 1.74 KB
/
acquisition_function.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
"""
Module containing acquisition functions for active learning
"""
import numpy as np
from scipy.stats import norm
def cnp_var(x_context, y_context, x_test, model):
"""
Suggests samples by maximising the predicted uncertainty (no exploitation, full exploration)
:param X: input data
:param model: CNP model
:return: y_var
"""
_, _, predict_test_var = model.predict(x_context, y_context, x_test)
return np.squeeze(predict_test_var.data.numpy())
def gp_var(X, model):
"""
Suggests samples by maximising the predicted uncertainty (no exploitation, full exploration)
:param X: input data
:param model: GPFlow model
:return: y_var
"""
_, y_var = model.predict_f(X)
return y_var.numpy().flatten()
def gp_ei(X_test, y_train, model, xi=0.01):
"""
Suggests samples by maximising the predicted expected improvement
Balance exploitation & exploration via parameter xi
:param X_test: input features of points to be sampled
:param y_train: target values from observed samples
:param model: GPFlow model
:param xi: Controls balance of explore/exploit
:return: EI scores
"""
y_pred, y_var = model.predict_y(X_test)
y_best = np.amax(y_train) # Best sample so far
y_std = np.sqrt(y_var)
with np.errstate(divide='warn'):
imp = y_pred - y_best - xi
Z = imp/y_std
ei = imp*norm.cdf(Z) + y_std*norm.pdf(Z)
#ei[y_var<1e-8] = 0.0
return ei.flatten()
def gp_greed(X, model):
"""
Suggests samples by maximising the predicted y (full exploitation, no exploration)
:param X: input data
:param model: GPFlow model
:return: X_samples
"""
y_pred, _ = model.predict_y(X)
return y_pred.numpy().flatten()