From b515962f68c096ba333830ffab073395396ec4a1 Mon Sep 17 00:00:00 2001 From: Marcus Date: Fri, 7 Jun 2024 17:04:53 -0700 Subject: [PATCH] minor --- fvgp/fvgp.py | 6 ++--- fvgp/gp.py | 54 +++++++++++++++++++++++--------------------- fvgp/gp_data.py | 6 ++--- fvgp/gp_posterior.py | 6 ++--- fvgp/gp_prior.py | 4 ++-- 5 files changed, 39 insertions(+), 37 deletions(-) diff --git a/fvgp/fvgp.py b/fvgp/fvgp.py index 1d65696..c3b4196 100755 --- a/fvgp/fvgp.py +++ b/fvgp/fvgp.py @@ -5,7 +5,7 @@ class fvGP(GP): """ - This class provides all the tools for a multitask Gaussian Process (GP). + This class provides all the tools for a multi-task Gaussian Process (GP). This class allows for full HPC support for training. After initialization, this class provides all the methods described for the GP class. @@ -20,7 +20,7 @@ class provides all the methods described for the GP class. N ... arbitrary integers (N1, N2,...) - The main logic of fvGP is that any multitask GP is just a single-task GP + The main logic of fvGP is that any multi-task GP is just a single-task GP over a Cartesian product space of input and output space, as long as the kernel is flexible enough, so prepare to work on your kernel. This is the best way to give the user optimal control and power. In the @@ -35,7 +35,7 @@ class provides all the methods described for the GP class. [0.2, 0.3,1],[0.9,0.6,1]] - This has to be understood and taken into account when customizing fvGP for multitask + This has to be understood and taken into account when customizing fvGP for multi-task use. The examples will provide deeper insight. Parameters diff --git a/fvgp/gp.py b/fvgp/gp.py index 07397a6..cf4fc9c 100755 --- a/fvgp/gp.py +++ b/fvgp/gp.py @@ -23,7 +23,7 @@ class GP: """ This class provides all the tools for a single-task Gaussian Process (GP). - Use fvGP for multitask GPs. However, the fvGP class inherits all methods from this class. + Use fvGP for multi-task GPs. However, the fvGP class inherits all methods from this class. This class allows for full HPC support for training via the HGDL package. V ... number of input points @@ -36,7 +36,9 @@ class GP: Parameters ---------- x_data : np.ndarray or list of tuples - The input point positions. Shape (V x D), where D is the `input_space_dim`. + The input point positions. Shape (V x D), where D is the `index_set_dim`. + For single-task GPs, the index set dimension = input space dimension. + For multi-task GPs, the index set dimension = input + output space dimensions. If dealing with non-Euclidean inputs x_data should be a list, not a numpy array. y_data : np.ndarray @@ -229,7 +231,7 @@ def __init__( "You have provided callables for kernel, mean, or noise functions but no" "initial hyperparameters.") else: - if init_hyperparameters is None: hyperparameters = np.ones((self.data.input_space_dim + 1)) + if init_hyperparameters is None: hyperparameters = np.ones((self.data.index_set_dim + 1)) else: hyperparameters = init_hyperparameters @@ -256,7 +258,7 @@ def __init__( ######################################## ###init prior instance################## ######################################## - self.prior = GPprior(self.data.input_space_dim, + self.prior = GPprior(self.data.index_set_dim, self.data.x_data, self.data.Euclidean, hyperparameters=hyperparameters, @@ -329,7 +331,7 @@ def update_gp_data( Parameters ---------- x_new : np.ndarray - The point positions. Shape (V x D), where D is the `input_space_dim`. + The point positions. Shape (V x D), where D is the `index_set_dim`. y_new : np.ndarray The values of the data points. Shape (V,1) or (V). noise_variances_new : np.ndarray, optional @@ -374,12 +376,12 @@ def _get_default_hyperparameter_bounds(self): """ if not self.data.Euclidean: raise Exception("Please provide custom hyperparameter bounds to " "the training in the non-Euclidean setting") - if len(self.prior.hyperparameters) != self.data.input_space_dim + 1: + if len(self.prior.hyperparameters) != self.data.index_set_dim + 1: raise Exception("Please provide custom hyperparameter_bounds when kernel, mean or noise" " functions are customized") - hyperparameter_bounds = np.zeros((self.data.input_space_dim + 1, 2)) + hyperparameter_bounds = np.zeros((self.data.index_set_dim + 1, 2)) hyperparameter_bounds[0] = np.array([np.var(self.data.y_data) / 100., np.var(self.data.y_data) * 10.]) - for i in range(self.data.input_space_dim): + for i in range(self.data.index_set_dim): range_xi = np.max(self.data.x_data[:, i]) - np.min(self.data.x_data[:, i]) hyperparameter_bounds[i + 1] = np.array([range_xi / 100., range_xi * 10.]) return hyperparameter_bounds @@ -760,7 +762,7 @@ def posterior_mean(self, x_pred, hyperparameters=None, x_out=None): a constraint during training. The default is None which means the initialized or trained hyperparameters are used. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -785,7 +787,7 @@ def posterior_mean_grad(self, x_pred, hyperparameters=None, x_out=None, directio a constraint during training. The default is None which means the initialized or trained hyperparameters are used. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). direction : int, optional @@ -809,7 +811,7 @@ def posterior_covariance(self, x_pred, x_out=None, variance_only=False, add_nois A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). variance_only : bool, optional @@ -836,7 +838,7 @@ def posterior_covariance_grad(self, x_pred, x_out=None, direction=None): A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). direction : int, optional @@ -859,7 +861,7 @@ def joint_gp_prior(self, x_pred, x_out=None): A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -883,7 +885,7 @@ def joint_gp_prior_grad(self, x_pred, direction, x_out=None): direction : int Direction of derivative. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -920,7 +922,7 @@ def gp_entropy(self, x_pred, x_out=None): x_pred : np.ndarray A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. - Output coordinates in case of multitask GP use; a numpy array of size (N x L), + Output coordinates in case of multi-task GP use; a numpy array of size (N x L), where N is the number of output points, and L is the dimensionality of the output space. @@ -943,7 +945,7 @@ def gp_entropy_grad(self, x_pred, direction, x_out=None): direction : int Direction of derivative. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1001,7 +1003,7 @@ def gp_kl_div(self, x_pred, comp_mean, comp_cov, x_out=None): comp_cov : np.ndarray Comparison covariance matrix for KL divergence. shape(comp_cov) = (len(x_pred),len(x_pred)) x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1028,7 +1030,7 @@ def gp_kl_div_grad(self, x_pred, comp_mean, comp_cov, direction, x_out=None): direction: int The direction in which the gradient will be computed. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1073,7 +1075,7 @@ def gp_mutual_information(self, x_pred, x_out=None): A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1100,7 +1102,7 @@ def gp_total_correlation(self, x_pred, x_out=None): A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1125,7 +1127,7 @@ def gp_relative_information_entropy(self, x_pred, x_out=None): A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1152,7 +1154,7 @@ def gp_relative_information_entropy_set(self, x_pred, x_out=None): A numpy array of shape (V x D), interpreted as an array of input point positions or a list for GPs on non-Euclidean input spaces. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1179,7 +1181,7 @@ def posterior_probability(self, x_pred, comp_mean, comp_cov, x_out=None): comp_cov: np.nparray Covariance matrix, in R^{len(x_pred) times len(x_pred)} x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1207,7 +1209,7 @@ def posterior_probability_grad(self, x_pred, comp_mean, comp_cov, direction, x_o direction : int The direction to compute the gradient in. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N), + Output coordinates in case of multi-task GP use; a numpy array of size (N), where N is the number evaluation points in the output direction. Usually this is np.ndarray([0,1,2,...]). @@ -1231,7 +1233,7 @@ def _crps_s(self, x, mu, sigma): def crps(self, x_test, y_test): """ This function calculates the continuous rank probability score. - Note that in the multitask setting the user should perform their + Note that in the multi-task setting the user should perform their input point transformation beforehand. Parameters @@ -1254,7 +1256,7 @@ def crps(self, x_test, y_test): def rmse(self, x_test, y_test): """ This function calculates the root mean squared error. - Note that in the multitask setting the user should perform their + Note that in the multi-task setting the user should perform their input point transformation beforehand. Parameters diff --git a/fvgp/gp_data.py b/fvgp/gp_data.py index 6e94ebb..82d5c68 100755 --- a/fvgp/gp_data.py +++ b/fvgp/gp_data.py @@ -14,10 +14,10 @@ def __init__(self, x_data, y_data, noise_variances=None): # analyse data if isinstance(x_data, np.ndarray): assert np.ndim(x_data) == 2 - self.input_space_dim = len(x_data[0]) + self.index_set_dim = len(x_data[0]) self.Euclidean = True if isinstance(x_data, list): - self.input_space_dim = 1 + self.index_set_dim = 1 self.Euclidean = False self.x_data = x_data @@ -34,7 +34,7 @@ def update(self, x_data_new, y_data_new, noise_variances_new=None, append=True): if self.Euclidean: assert isinstance(x_data_new, np.ndarray) and np.ndim(x_data_new) == 2 else: assert (isinstance(x_data_new, list) and np.ndim(x_data_new) == 2 and - self.input_space_dim == x_data_new.shape[1]) + self.index_set_dim == x_data_new.shape[1]) if self.noise_variances is not None and noise_variances_new is None: raise Exception("Please provide noise_variances in the data update because you did at initialization " diff --git a/fvgp/gp_posterior.py b/fvgp/gp_posterior.py index c2ebf5b..f164b7f 100755 --- a/fvgp/gp_posterior.py +++ b/fvgp/gp_posterior.py @@ -246,7 +246,7 @@ def gp_entropy(self, x_pred, x_out=None): x_pred : np.ndarray A numpy array of shape (V x D), interpreted as an array of input point positions. x_out : np.ndarray, optional - Output coordinates in case of multitask GP use; a numpy array of size (N x L), + Output coordinates in case of multi-task GP use; a numpy array of size (N x L), where N is the number of output points, and L is the dimensionality of the output space. @@ -440,9 +440,9 @@ def _perform_input_checks(self, x_pred, x_out): if isinstance(x_pred, np.ndarray): assert np.ndim(x_pred) == 2 if isinstance(x_out, np.ndarray): - assert x_pred.shape[1] == self.data_obj.input_space_dim - 1 + assert x_pred.shape[1] == self.data_obj.index_set_dim - 1 else: - assert x_pred.shape[1] == self.data_obj.input_space_dim + assert x_pred.shape[1] == self.data_obj.index_set_dim assert isinstance(x_out, np.ndarray) or x_out is None if isinstance(x_out, np.ndarray): assert np.ndim(x_out) == 1 diff --git a/fvgp/gp_prior.py b/fvgp/gp_prior.py index d7a759b..a4b0e61 100755 --- a/fvgp/gp_prior.py +++ b/fvgp/gp_prior.py @@ -10,7 +10,7 @@ class GPprior: def __init__(self, - input_space_dim, + index_set_dim, x_data, Euclidean, gp_kernel_function=None, @@ -34,7 +34,7 @@ def __init__(self, assert isinstance(cov_comp_mode, str) assert isinstance(constant_mean, float) - self.input_space_dim = input_space_dim + self.index_set_dim = index_set_dim self.Euclidean = Euclidean self.gp_kernel_function = gp_kernel_function self.gp_mean_function = gp_mean_function