diff --git a/.github/workflows/quality-check.yaml b/.github/workflows/quality-check.yaml index 5eb76654..4f62c303 100644 --- a/.github/workflows/quality-check.yaml +++ b/.github/workflows/quality-check.yaml @@ -22,7 +22,7 @@ jobs: strategy: matrix: python-version: ["3.7", "3.8", "3.9", "3.10"] - tensorflow: ["~=2.5.0", "~=2.6.0", "~=2.7.0", "~=2.8.0", "~=2.9.0", "~=2.10.0", "~=2.11.0", "~=2.12.0", "~=2.13.0", "~=2.14.0", "~=2.15.0"] + tensorflow: ["~=2.5.0", "~=2.6.0", "~=2.7.0", "~=2.8.0", "~=2.9.0", "~=2.10.0", "~=2.11.0", "~=2.12.0", "~=2.13.0", "~=2.14.0", "~=2.15.0", "~=2.16.0"] include: - tensorflow: "~=2.5.0" keras: "~=2.6.0" @@ -57,6 +57,9 @@ jobs: - tensorflow: "~=2.15.0" keras: "~=2.15.0" tensorflow-probability: "~=0.23.0" + - tensorflow: "~=2.16.0" + keras: "~=2.16.0" + tensorflow-probability: "~=0.24.0" exclude: # These older versions of TensorFlow don't work with Python 3.10: - python-version: "3.10" @@ -74,11 +77,15 @@ jobs: tensorflow: "~=2.14.0" - python-version: "3.7" tensorflow: "~=2.15.0" + - python-version: "3.7" + tensorflow: "~=2.16.0" # These newer versions of TensorFlow don't work with Python 3.8: - python-version: "3.8" tensorflow: "~=2.14.0" - python-version: "3.8" tensorflow: "~=2.15.0" + - python-version: "3.8" + tensorflow: "~=2.16.0" name: Python-${{ matrix.python-version }} tensorflow${{ matrix.tensorflow }} env: diff --git a/benchmarking/main.py b/benchmarking/main.py index 99cdf0d6..a88718c0 100644 --- a/benchmarking/main.py +++ b/benchmarking/main.py @@ -23,6 +23,7 @@ import tensorflow as tf from bayesian_benchmarks import data as uci_datasets from bayesian_benchmarks.data import Dataset +from gpflow.keras import tf_keras from sacred import Experiment from scipy.stats import norm from utils import ExperimentName, git_version @@ -89,10 +90,10 @@ def build_model(X, num_inducing, num_layers): @EXPERIMENT.capture -def train_model(model: tf.keras.models.Model, data_train, batch_size, num_epochs): +def train_model(model: tf_keras.models.Model, data_train, batch_size, num_epochs): X_train, Y_train = data_train callbacks = [ - tf.keras.callbacks.ReduceLROnPlateau( + tf_keras.callbacks.ReduceLROnPlateau( 'loss', factor=0.95, patience=3, min_lr=1e-6, verbose=1 ), ] diff --git a/docs/notebooks/efficient_sampling.py b/docs/notebooks/efficient_sampling.py index 5b6a6da5..a012903c 100644 --- a/docs/notebooks/efficient_sampling.py +++ b/docs/notebooks/efficient_sampling.py @@ -35,6 +35,7 @@ import gpflux from gpflow.config import default_float +from gpflow.keras import tf_keras from gpflux.layers.basis_functions.fourier_features import RandomFourierFeaturesCosine from gpflux.sampling import KernelWithFeatureDecomposition @@ -102,7 +103,7 @@ model.compile(tf.optimizers.Adam(learning_rate=0.1)) callbacks = [ - tf.keras.callbacks.ReduceLROnPlateau( + tf_keras.callbacks.ReduceLROnPlateau( monitor="loss", patience=5, factor=0.95, diff --git a/docs/notebooks/gpflux_features.py b/docs/notebooks/gpflux_features.py index 909ea7fd..62709368 100644 --- a/docs/notebooks/gpflux_features.py +++ b/docs/notebooks/gpflux_features.py @@ -63,6 +63,7 @@ def motorcycle_data(): # %% import gpflux +from gpflow.keras import tf_keras from gpflux.architectures import Config, build_constant_input_dim_deep_gp from gpflux.models import DeepGP @@ -80,7 +81,7 @@ def motorcycle_data(): # %% # From the `DeepGP` model we instantiate a training model which is a `tf.keras.Model` -training_model: tf.keras.Model = deep_gp.as_training_model() +training_model: tf_keras.Model = deep_gp.as_training_model() # Following the Keras procedure we need to compile and pass a optimizer, # before fitting the model to data @@ -88,11 +89,11 @@ def motorcycle_data(): callbacks = [ # Create callback that reduces the learning rate every time the ELBO plateaus - tf.keras.callbacks.ReduceLROnPlateau("loss", factor=0.95, patience=3, min_lr=1e-6, verbose=0), + tf_keras.callbacks.ReduceLROnPlateau("loss", factor=0.95, patience=3, min_lr=1e-6, verbose=0), # Create a callback that writes logs (e.g., hyperparameters, KLs, etc.) to TensorBoard gpflux.callbacks.TensorBoard(), # Create a callback that saves the model's weights - tf.keras.callbacks.ModelCheckpoint(filepath="ckpts/", save_weights_only=True, verbose=0), + tf_keras.callbacks.ModelCheckpoint(filepath="ckpts/", save_weights_only=True, verbose=0), ] history = training_model.fit( diff --git a/docs/notebooks/gpflux_with_keras_layers.py b/docs/notebooks/gpflux_with_keras_layers.py index b373b720..341d4da5 100644 --- a/docs/notebooks/gpflux_with_keras_layers.py +++ b/docs/notebooks/gpflux_with_keras_layers.py @@ -29,6 +29,7 @@ import gpflux from gpflow.config import default_float +from gpflow.keras import tf_keras # %% [markdown] @@ -78,11 +79,11 @@ likelihood_container = gpflux.layers.TrackableLayer() likelihood_container.likelihood = likelihood -model = tf.keras.Sequential( +model = tf_keras.Sequential( [ - tf.keras.layers.Dense(100, activation="relu"), - tf.keras.layers.Dense(100, activation="relu"), - tf.keras.layers.Dense(1, activation="linear"), + tf_keras.layers.Dense(100, activation="relu"), + tf_keras.layers.Dense(100, activation="relu"), + tf_keras.layers.Dense(1, activation="linear"), gp_layer, likelihood_container, # no-op, for discovering trainable likelihood parameters ] diff --git a/docs/notebooks/keras_integration.py b/docs/notebooks/keras_integration.py index 80083b66..6ef111b2 100644 --- a/docs/notebooks/keras_integration.py +++ b/docs/notebooks/keras_integration.py @@ -25,6 +25,7 @@ import gpflow import gpflux from gpflow.ci_utils import reduce_in_tests +from gpflow.keras import tf_keras import matplotlib.pyplot as plt @@ -83,10 +84,10 @@ def create_model(model_class): num_epochs = reduce_in_tests(200) # %% -dgp = create_model(tf.keras.Model) +dgp = create_model(tf_keras.Model) callbacks = [ - tf.keras.callbacks.ReduceLROnPlateau( + tf_keras.callbacks.ReduceLROnPlateau( monitor="loss", patience=5, factor=0.95, @@ -106,7 +107,7 @@ def create_model(model_class): dgp_natgrad = create_model(gpflux.optimization.NatGradModel) callbacks = [ - tf.keras.callbacks.ReduceLROnPlateau( + tf_keras.callbacks.ReduceLROnPlateau( monitor="loss", patience=5, factor=0.95, diff --git a/gpflux/callbacks.py b/gpflux/callbacks.py index 1ce31776..1da15cb3 100644 --- a/gpflux/callbacks.py +++ b/gpflux/callbacks.py @@ -21,15 +21,14 @@ import re from typing import Any, Dict, List, Mapping, Optional, Union -import tensorflow as tf - import gpflow +from gpflow.keras import tf_keras from gpflow.utilities import parameter_dict __all__ = ["TensorBoard"] -class TensorBoard(tf.keras.callbacks.TensorBoard): +class TensorBoard(tf_keras.callbacks.TensorBoard): """ This class is a thin wrapper around a `tf.keras.callbacks.TensorBoard` callback that also calls GPflow's `gpflow.monitor.ModelToTensorBoard` monitoring task. @@ -100,7 +99,7 @@ def __init__( self.keywords_to_monitor = keywords_to_monitor self.max_size = max_size - def set_model(self, model: tf.keras.Model) -> None: + def set_model(self, model: tf_keras.Model) -> None: """ Set the model (extends the Keras `set_model `_ diff --git a/gpflux/layers/basis_functions/fourier_features/base.py b/gpflux/layers/basis_functions/fourier_features/base.py index 6c2bc779..0efce2b9 100644 --- a/gpflux/layers/basis_functions/fourier_features/base.py +++ b/gpflux/layers/basis_functions/fourier_features/base.py @@ -22,11 +22,12 @@ import gpflow from gpflow.base import TensorType +from gpflow.keras import tf_keras from gpflux.types import ShapeType -class FourierFeaturesBase(ABC, tf.keras.layers.Layer): +class FourierFeaturesBase(ABC, tf_keras.layers.Layer): r""" The base class for all Fourier feature layers, used for both random Fourier feature layers and quadrature layers. We subclass :class:`tf.keras.layers.Layer`, so we must provide diff --git a/gpflux/layers/latent_variable_layer.py b/gpflux/layers/latent_variable_layer.py index b8e16d59..961cbbc1 100644 --- a/gpflux/layers/latent_variable_layer.py +++ b/gpflux/layers/latent_variable_layer.py @@ -23,6 +23,7 @@ from gpflow import default_float from gpflow.base import TensorType +from gpflow.keras import tf_keras from gpflux.layers.trackable_layer import TrackableLayer from gpflux.types import ObservationType @@ -67,14 +68,14 @@ class LatentVariableLayer(LayerWithObservations): prior: tfp.distributions.Distribution """ The prior distribution for the latent variables. """ - encoder: tf.keras.layers.Layer + encoder: tf_keras.layers.Layer """ An encoder that maps from a concatenation of inputs and targets to the parameters of the approximate posterior distribution of the corresponding latent variables. """ - compositor: tf.keras.layers.Layer + compositor: tf_keras.layers.Layer """ A layer that takes as input the two-element ``[layer_inputs, latent_variable_samples]`` list and combines the elements into a single output tensor. @@ -83,8 +84,8 @@ class LatentVariableLayer(LayerWithObservations): def __init__( self, prior: tfp.distributions.Distribution, - encoder: tf.keras.layers.Layer, - compositor: Optional[tf.keras.layers.Layer] = None, + encoder: tf_keras.layers.Layer, + compositor: Optional[tf_keras.layers.Layer] = None, name: Optional[str] = None, ): """ @@ -108,7 +109,7 @@ def __init__( self.compositor = ( compositor if compositor is not None - else tf.keras.layers.Concatenate(axis=-1, dtype=default_float()) + else tf_keras.layers.Concatenate(axis=-1, dtype=default_float()) ) def call( diff --git a/gpflux/layers/trackable_layer.py b/gpflux/layers/trackable_layer.py index e4a5d8af..86c8d073 100644 --- a/gpflux/layers/trackable_layer.py +++ b/gpflux/layers/trackable_layer.py @@ -15,8 +15,8 @@ # """Utility layer that tracks variables in :class:`tf.Module`.""" -import tensorflow as tf from deprecated import deprecated +from gpflow.keras import tf_keras @deprecated( @@ -27,7 +27,7 @@ "be removed in GPflux version `1.0.0`." ) ) -class TrackableLayer(tf.keras.layers.Layer): +class TrackableLayer(tf_keras.layers.Layer): """ With the release of TensorFlow 2.5, our TrackableLayer workaround is no longer needed. See https://github.com/Prowler-io/gpflux/issues/189. diff --git a/gpflux/losses.py b/gpflux/losses.py index ea590256..c43a7b4b 100644 --- a/gpflux/losses.py +++ b/gpflux/losses.py @@ -25,11 +25,12 @@ import gpflow from gpflow.base import TensorType +from gpflow.keras import tf_keras from gpflux.types import unwrap_dist -class LikelihoodLoss(tf.keras.losses.Loss): +class LikelihoodLoss(tf_keras.losses.Loss): r""" This class is a `tf.keras.losses.Loss` implementation that wraps a GPflow :class:`~gpflow.likelihoods.Likelihood` instance. diff --git a/gpflux/models/deep_gp.py b/gpflux/models/deep_gp.py index 529e2ca7..a0aae099 100644 --- a/gpflux/models/deep_gp.py +++ b/gpflux/models/deep_gp.py @@ -22,6 +22,7 @@ import gpflow from gpflow.base import Module, TensorType +from gpflow.keras import tf_keras import gpflux from gpflux.layers import LayerWithObservations, LikelihoodLayer @@ -46,16 +47,16 @@ class DeepGP(Module): for more details. """ - inputs: tf.keras.Input - targets: tf.keras.Input + inputs: tf_keras.Input + targets: tf_keras.Input - f_layers: List[tf.keras.layers.Layer] + f_layers: List[tf_keras.layers.Layer] """ A list of all layers in this DeepGP (just :attr:`likelihood_layer` is separate). """ likelihood_layer: gpflux.layers.LikelihoodLayer """ The likelihood layer. """ - default_model_class: Type[tf.keras.Model] + default_model_class: Type[tf_keras.Model] """ The default for the *model_class* argument of :meth:`as_training_model` and :meth:`as_prediction_model`. This must have the same semantics as `tf.keras.Model`, @@ -73,14 +74,14 @@ class DeepGP(Module): def __init__( self, - f_layers: List[tf.keras.layers.Layer], + f_layers: List[tf_keras.layers.Layer], likelihood: Union[ gpflux.layers.LikelihoodLayer, gpflow.likelihoods.Likelihood ], # fully-qualified for autoapi *, input_dim: Optional[int] = None, target_dim: Optional[int] = None, - default_model_class: Type[tf.keras.Model] = tf.keras.Model, + default_model_class: Type[tf_keras.Model] = tf_keras.Model, num_data: Optional[int] = None, ): """ @@ -99,8 +100,8 @@ def __init__( If you do not specify a value for this parameter explicitly, it is automatically detected from the :attr:`~gpflux.layers.GPLayer.num_data` attribute in the GP layers. """ - self.inputs = tf.keras.Input((input_dim,), dtype=gpflow.default_float(), name="inputs") - self.targets = tf.keras.Input((target_dim,), dtype=gpflow.default_float(), name="targets") + self.inputs = tf_keras.Input((input_dim,), dtype=gpflow.default_float(), name="inputs") + self.targets = tf_keras.Input((target_dim,), dtype=gpflow.default_float(), name="targets") self.f_layers = f_layers if isinstance(likelihood, gpflow.likelihoods.Likelihood): self.likelihood_layer = LikelihoodLayer(likelihood) @@ -111,7 +112,7 @@ def __init__( @staticmethod def _validate_num_data( - f_layers: List[tf.keras.layers.Layer], num_data: Optional[int] = None + f_layers: List[tf_keras.layers.Layer], num_data: Optional[int] = None ) -> int: """ Check that the :attr:`~gpflux.layers.gp_layer.GPLayer.num_data` @@ -229,15 +230,15 @@ def elbo(self, data: Tuple[TensorType, TensorType]) -> tf.Tensor: ] return -tf.reduce_sum(all_losses) * self.num_data - def _get_model_class(self, model_class: Optional[Type[tf.keras.Model]]) -> Type[tf.keras.Model]: + def _get_model_class(self, model_class: Optional[Type[tf_keras.Model]]) -> Type[tf_keras.Model]: if model_class is not None: return model_class else: return self.default_model_class def as_training_model( - self, model_class: Optional[Type[tf.keras.Model]] = None - ) -> tf.keras.Model: + self, model_class: Optional[Type[tf_keras.Model]] = None + ) -> tf_keras.Model: r""" Construct a `tf.keras.Model` instance that requires you to provide both ``inputs`` and ``targets`` to its call. This information is required for @@ -269,8 +270,8 @@ def as_training_model( return model_class([self.inputs, self.targets], outputs) def as_prediction_model( - self, model_class: Optional[Type[tf.keras.Model]] = None - ) -> tf.keras.Model: + self, model_class: Optional[Type[tf_keras.Model]] = None + ) -> tf_keras.Model: """ Construct a `tf.keras.Model` instance that requires only ``inputs``, which means you do not have to provide dummy target values when diff --git a/gpflux/optimization/keras_natgrad.py b/gpflux/optimization/keras_natgrad.py index b009922f..4bdd5781 100644 --- a/gpflux/optimization/keras_natgrad.py +++ b/gpflux/optimization/keras_natgrad.py @@ -24,6 +24,7 @@ import gpflow from gpflow import Parameter +from gpflow.keras import tf_keras from gpflow.models.model import MeanAndVariance from gpflow.optimizers import NaturalGradient @@ -35,7 +36,7 @@ ] -class NatGradModel(tf.keras.Model): +class NatGradModel(tf_keras.Model): r""" This is a drop-in replacement for `tf.keras.Model` when constructing GPflux models using the functional Keras style, to make it work with the @@ -228,7 +229,7 @@ class NatGradWrapper(NatGradModel): This class will probably be removed in the future. """ - def __init__(self, base_model: tf.keras.Model, *args: Any, **kwargs: Any): + def __init__(self, base_model: tf_keras.Model, *args: Any, **kwargs: Any): """ :param base_model: the class-based Keras model to be wrapped """ @@ -236,7 +237,7 @@ def __init__(self, base_model: tf.keras.Model, *args: Any, **kwargs: Any): self.base_model = base_model @property - def layers(self) -> List[tf.keras.layers.Layer]: + def layers(self) -> List[tf_keras.layers.Layer]: if not hasattr(self, "base_model"): # required for super().__init__(), in which base_model has not been set yet return super().layers diff --git a/gpflux/sampling/kernel_with_feature_decomposition.py b/gpflux/sampling/kernel_with_feature_decomposition.py index bad1f80c..685598c5 100644 --- a/gpflux/sampling/kernel_with_feature_decomposition.py +++ b/gpflux/sampling/kernel_with_feature_decomposition.py @@ -34,6 +34,7 @@ import gpflow from gpflow.base import TensorType +from gpflow.keras import tf_keras NoneType = type(None) @@ -51,7 +52,7 @@ class _ApproximateKernel(gpflow.kernels.Kernel): def __init__( self, - feature_functions: tf.keras.layers.Layer, + feature_functions: tf_keras.layers.Layer, feature_coefficients: TensorType, ): r""" @@ -128,7 +129,7 @@ class KernelWithFeatureDecomposition(gpflow.kernels.Kernel): def __init__( self, kernel: Union[gpflow.kernels.Kernel, NoneType], - feature_functions: tf.keras.layers.Layer, + feature_functions: tf_keras.layers.Layer, feature_coefficients: TensorType, ): r""" @@ -161,7 +162,7 @@ def __init__( tf.ensure_shape(self._feature_coefficients, tf.TensorShape([None, 1])) @property - def feature_functions(self) -> tf.keras.layers.Layer: + def feature_functions(self) -> tf_keras.layers.Layer: r"""Return the kernel's features :math:`\phi_i(\cdot)`.""" return self._feature_functions diff --git a/gpflux/version.py b/gpflux/version.py index 5c5710ac..ff152793 100644 --- a/gpflux/version.py +++ b/gpflux/version.py @@ -15,4 +15,4 @@ # """Adds __version__""" -__version__ = "0.4.3" +__version__ = "0.4.4" diff --git a/setup.py b/setup.py index d2cff5c4..e708f16e 100644 --- a/setup.py +++ b/setup.py @@ -6,14 +6,13 @@ requirements = [ "deprecated", - "gpflow>=2.6.3", + "gpflow>=2.9.2", "numpy", "scipy", - "tensorflow>=2.5.0,<=2.15.0; platform_system!='Darwin' or platform_machine!='arm64'", + "tensorflow>=2.5.0,<=2.16; platform_system!='Darwin' or platform_machine!='arm64'", # NOTE: Support of Apple Silicon MacOS platforms is in an experimental mode - "tensorflow-macos>=2.5.0,<=2.15.0; platform_system=='Darwin' and platform_machine=='arm64'", - # NOTE: once we require tensorflow-probability>=0.12, we can remove our custom deepcopy handling - "tensorflow-probability>=0.13.0,<=0.23.0", + "tensorflow-macos>=2.5.0,<=2.16; platform_system=='Darwin' and platform_machine=='arm64'", + "tensorflow-probability>=0.13.0,<=0.24", ] with open("README.md", "r") as file: diff --git a/tests/gpflux/layers/basis_functions/fourier_features/test_quadrature.py b/tests/gpflux/layers/basis_functions/fourier_features/test_quadrature.py index 9cd33253..efbd5f4f 100644 --- a/tests/gpflux/layers/basis_functions/fourier_features/test_quadrature.py +++ b/tests/gpflux/layers/basis_functions/fourier_features/test_quadrature.py @@ -20,6 +20,7 @@ from tensorflow.python.keras.utils.kernelized_utils import inner_product import gpflow +from gpflow.keras import tf_keras from gpflow.quadrature.gauss_hermite import NDiagGHQuadrature from gpflow.utilities.ops import difference_matrix @@ -150,7 +151,7 @@ def test_fourier_features_shapes(n_components, n_dims, batch_size): def test_keras_testing_util_layer_test_1D(kernel_cls, batch_size, n_components): kernel = kernel_cls() - tf.keras.utils.get_custom_objects()["QuadratureFourierFeatures"] = QuadratureFourierFeatures + tf_keras.utils.get_custom_objects()["QuadratureFourierFeatures"] = QuadratureFourierFeatures layer_test( QuadratureFourierFeatures, kwargs={ @@ -169,7 +170,7 @@ def test_keras_testing_util_layer_test_1D(kernel_cls, batch_size, n_components): def test_keras_testing_util_layer_test_multidim(kernel_cls, batch_size, n_dims, n_components): kernel = kernel_cls() - tf.keras.utils.get_custom_objects()["QuadratureFourierFeatures"] = QuadratureFourierFeatures + tf_keras.utils.get_custom_objects()["QuadratureFourierFeatures"] = QuadratureFourierFeatures layer_test( QuadratureFourierFeatures, kwargs={ diff --git a/tests/gpflux/layers/basis_functions/fourier_features/test_random.py b/tests/gpflux/layers/basis_functions/fourier_features/test_random.py index fe1717cb..741b371a 100644 --- a/tests/gpflux/layers/basis_functions/fourier_features/test_random.py +++ b/tests/gpflux/layers/basis_functions/fourier_features/test_random.py @@ -20,6 +20,7 @@ from tensorflow.python.keras.utils.kernelized_utils import inner_product import gpflow +from gpflow.keras import tf_keras from gpflux.layers.basis_functions.fourier_features.random import ( OrthogonalRandomFeatures, @@ -250,7 +251,7 @@ def test_multioutput_fourier_features_shapes( def test_keras_testing_util_layer_test_1D(kernel_cls, batch_size, n_components): kernel = kernel_cls() - tf.keras.utils.get_custom_objects()["RandomFourierFeatures"] = RandomFourierFeatures + tf_keras.utils.get_custom_objects()["RandomFourierFeatures"] = RandomFourierFeatures layer_test( RandomFourierFeatures, kwargs={ @@ -269,7 +270,7 @@ def test_keras_testing_util_layer_test_1D(kernel_cls, batch_size, n_components): def test_keras_testing_util_layer_test_multidim(kernel_cls, batch_size, n_dims, n_components): kernel = kernel_cls() - tf.keras.utils.get_custom_objects()["RandomFourierFeatures"] = RandomFourierFeatures + tf_keras.utils.get_custom_objects()["RandomFourierFeatures"] = RandomFourierFeatures layer_test( RandomFourierFeatures, kwargs={ diff --git a/tests/gpflux/layers/test_dedup_trackable_layer.py b/tests/gpflux/layers/test_dedup_trackable_layer.py index 6f110e07..bb1fe690 100644 --- a/tests/gpflux/layers/test_dedup_trackable_layer.py +++ b/tests/gpflux/layers/test_dedup_trackable_layer.py @@ -21,6 +21,7 @@ from tensorflow.python.ops.resource_variable_ops import ResourceVariable import gpflow +from gpflow.keras import tf_keras from gpflow.utilities import parameter_dict import gpflux @@ -36,7 +37,7 @@ class CONFIG: num_data = 7 -def count_params(model: tf.keras.models.Model) -> int: +def count_params(model: tf_keras.models.Model) -> int: """ Counts the total number of scalar parameters in a Model. @@ -54,7 +55,7 @@ def data() -> Tuple[np.ndarray, np.ndarray]: @pytest.fixture -def model(data) -> tf.keras.models.Model: +def model(data) -> tf_keras.models.Model: """ Builds a two-layer deep GP model. """ @@ -69,11 +70,11 @@ def model(data) -> tf.keras.models.Model: likelihood_layer = gpflux.layers.LikelihoodLayer(gpflow.likelihoods.Gaussian(0.01)) - X = tf.keras.Input((input_dim,)) + X = tf_keras.Input((input_dim,)) f1 = layer1(X) f2 = layer2(f1) y = likelihood_layer(f2) - return tf.keras.Model(inputs=X, outputs=y) + return tf_keras.Model(inputs=X, outputs=y) def _size_q_sqrt(num_inducing, output_dim): diff --git a/tests/gpflux/models/test_bayesian_model.py b/tests/gpflux/models/test_bayesian_model.py index 6ae0c8fb..970ae18f 100644 --- a/tests/gpflux/models/test_bayesian_model.py +++ b/tests/gpflux/models/test_bayesian_model.py @@ -19,6 +19,7 @@ import tensorflow_probability as tfp from gpflow import default_float +from gpflow.keras import tf_keras from gpflow.likelihoods import Gaussian from gpflux.layers import LatentVariableLayer, LikelihoodLayer @@ -30,12 +31,12 @@ def build_latent_layer(w_dim, x_dim, y_dim): def build_encoder(): - inputs = tf.keras.Input((x_dim + y_dim,)) - x1 = tf.keras.layers.Dense(100)(inputs) - x2 = tf.keras.layers.Dense(20)(x1) - mean = tf.keras.layers.Dense(w_dim, activation="linear", name="output_mean")(x2) - std = tf.keras.layers.Dense(w_dim, activation="softplus", name="output_std")(x2) - return tf.keras.Model(inputs=[inputs], outputs=[mean, std]) + inputs = tf_keras.Input((x_dim + y_dim,)) + x1 = tf_keras.layers.Dense(100)(inputs) + x2 = tf_keras.layers.Dense(20)(x1) + mean = tf_keras.layers.Dense(w_dim, activation="linear", name="output_mean")(x2) + std = tf_keras.layers.Dense(w_dim, activation="softplus", name="output_std")(x2) + return tf_keras.Model(inputs=[inputs], outputs=[mean, std]) def build_prior(): mean = np.zeros(w_dim) diff --git a/tests/gpflux/test_callbacks.py b/tests/gpflux/test_callbacks.py index 872d4cca..f5aadb79 100644 --- a/tests/gpflux/test_callbacks.py +++ b/tests/gpflux/test_callbacks.py @@ -22,6 +22,7 @@ from packaging.version import Version import gpflow +from gpflow.keras import tf_keras import gpflux from gpflux.experiment_support.tensorboard import tensorboard_event_iterator @@ -47,7 +48,7 @@ def data() -> Tuple[np.ndarray, np.ndarray]: @pytest.fixture -def model_and_loss(data) -> Tuple[tf.keras.models.Model, tf.keras.losses.Loss]: +def model_and_loss(data) -> Tuple[tf_keras.models.Model, tf_keras.losses.Loss]: """ Builds a two-layer deep GP model. """ @@ -66,7 +67,7 @@ def model_and_loss(data) -> Tuple[tf.keras.models.Model, tf.keras.losses.Loss]: likelihood = gpflow.likelihoods.Gaussian(CONFIG.likelihood_variance) gpflow.set_trainable(likelihood.variance, False) - X = tf.keras.Input((input_dim,)) + X = tf_keras.Input((input_dim,)) f1 = layer1(X) f2 = layer2(f1) @@ -76,7 +77,7 @@ def model_and_loss(data) -> Tuple[tf.keras.models.Model, tf.keras.losses.Loss]: y = likelihood_container(f2) loss = gpflux.losses.LikelihoodLoss(likelihood) - return tf.keras.Model(inputs=X, outputs=y), loss + return tf_keras.Model(inputs=X, outputs=y), loss @pytest.mark.parametrize("update_freq", ["epoch", "batch"]) @@ -85,11 +86,11 @@ def test_tensorboard_callback(tmp_path, model_and_loss, data, update_freq): tmp_path = str(tmp_path) dataset = tf.data.Dataset.from_tensor_slices(data).batch(CONFIG.num_data) - optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) + optimizer = tf_keras.optimizers.Adam(learning_rate=1e-2) model, loss = model_and_loss model.compile(optimizer=optimizer, loss=loss) callbacks = [ - tf.keras.callbacks.ReduceLROnPlateau( + tf_keras.callbacks.ReduceLROnPlateau( monitor="loss", patience=1, factor=0.95, diff --git a/tests/integration/test_compilation.py b/tests/integration/test_compilation.py index c4e44d14..c1bd02d6 100644 --- a/tests/integration/test_compilation.py +++ b/tests/integration/test_compilation.py @@ -18,6 +18,7 @@ import tensorflow as tf from tensorflow import keras +from gpflow.keras import tf_keras from gpflow.kernels import RBF from gpflow.likelihoods import Gaussian from gpflow.mean_functions import Zero @@ -74,7 +75,7 @@ def build_keras_functional_deep_gp(layer_sizes, num_data): container.likelihood = likelihood outputs = container(x) # to track likelihood - model = tf.keras.Model(inputs=[inputs], outputs=outputs, name="deep_gp_fp") + model = tf_keras.Model(inputs=[inputs], outputs=outputs, name="deep_gp_fp") loss = LikelihoodLoss(likelihood) return model, loss @@ -83,7 +84,7 @@ def build_keras_objected_oriented_deep_gp(layer_sizes, num_data): gp_layers = build_gp_layers(layer_sizes, num_data) likelihood = Gaussian() - class KerasDeepGP(tf.keras.Model, TrackableLayer): + class KerasDeepGP(tf_keras.Model, TrackableLayer): def __init__(self, gp_layers, likelihood): super().__init__(name="deep_gp_oop") self.gp_layers = gp_layers @@ -138,7 +139,7 @@ def test_model_compilation(deep_gp_model_builder): train_dataset = tf.data.Dataset.from_tensor_slices(dataset).batch(batch) - optimizer = tf.keras.optimizers.Adam() + optimizer = tf_keras.optimizers.Adam() deep_gp_model.compile(optimizer=optimizer, loss=loss) @@ -182,7 +183,7 @@ def test_model_eager(deep_gp_model_builder, use_tf_function): train_dataset = tf.data.Dataset.from_tensor_slices(dataset).repeat().batch(batch) - optimizer = tf.keras.optimizers.Adam() + optimizer = tf_keras.optimizers.Adam() train_dataset_iter = iter(train_dataset) test_mini_batch = next(train_dataset_iter) diff --git a/tests/integration/test_latent_variable_integration.py b/tests/integration/test_latent_variable_integration.py index 98debf82..df8419fa 100644 --- a/tests/integration/test_latent_variable_integration.py +++ b/tests/integration/test_latent_variable_integration.py @@ -18,9 +18,9 @@ import numpy as np import pytest import tensorflow as tf -import tensorflow.keras as keras import tensorflow_probability as tfp +from gpflow.keras import tf_keras from gpflow.kernels import RBF from gpflow.likelihoods import Gaussian from gpflow.mean_functions import Zero @@ -56,7 +56,7 @@ def train_model(x_data, y_data, model, use_keras_compile): dataset_dict = {"inputs": x_data, "targets": y_data} num_data = len(x_data) - optimizer = tf.keras.optimizers.Adam() + optimizer = tf_keras.optimizers.Adam() epochs = 20 diff --git a/tests/integration/test_svgp_equivalence.py b/tests/integration/test_svgp_equivalence.py index ab7d86dc..1e8af087 100644 --- a/tests/integration/test_svgp_equivalence.py +++ b/tests/integration/test_svgp_equivalence.py @@ -23,6 +23,7 @@ import gpflow from gpflow import Parameter +from gpflow.keras import tf_keras from gpflow.models.model import RegressionData from gpflow.utilities import positive, to_default_float @@ -92,7 +93,7 @@ def create_gpflux_sequential_and_loss(kernel, likelihood, inducing_variable, num loss = gpflux.losses.LikelihoodLoss(likelihood) likelihood_container = gpflux.layers.TrackableLayer() likelihood_container.likelihood = likelihood # for likelihood to be discovered as trainable - model = tf.keras.Sequential([gp_layer, likelihood_container]) + model = tf_keras.Sequential([gp_layer, likelihood_container]) return model, loss