From 543ba052d8cd0ff72b40aa4fec2523dffc9a6ec1 Mon Sep 17 00:00:00 2001 From: TensorFlow Hub Authors Date: Wed, 15 Nov 2023 07:57:50 -0800 Subject: [PATCH] Enforce using Keras 2 PiperOrigin-RevId: 582676829 --- tensorflow_hub/feature_column_test.py | 16 ++- tensorflow_hub/feature_column_v2_test.py | 44 ++++--- tensorflow_hub/keras_layer.py | 22 ++-- tensorflow_hub/keras_layer_test.py | 145 +++++++++++++---------- tensorflow_hub/native_module_test.py | 34 ++++-- tensorflow_hub/pip_package/setup.py | 1 + 6 files changed, 159 insertions(+), 103 deletions(-) diff --git a/tensorflow_hub/feature_column_test.py b/tensorflow_hub/feature_column_test.py index 762069499..19a581209 100644 --- a/tensorflow_hub/feature_column_test.py +++ b/tensorflow_hub/feature_column_test.py @@ -20,6 +20,14 @@ import tensorflow as tf import tensorflow_hub as hub +# pylint: disable=g-import-not-at-top +# Use Keras 2. +version_fn = getattr(tf.keras, "version", None) +if version_fn and version_fn().startswith("3."): + import tf_keras as keras +else: + keras = tf.keras + # pylint: disable=g-direct-tensorflow-import from tensorflow.python.feature_column import feature_column_v2 from tensorflow.python.ops.lookup_ops import HashTable @@ -130,7 +138,7 @@ def testDenseFeatures(self): with tf.Graph().as_default(): # We want to test with dense_features_v2.DenseFeatures. This symbol was # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a. - feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns) + feature_layer = keras.layers.DenseFeatures(feature_columns) feature_layer_out = feature_layer(features) with tf.compat.v1.train.MonitoredSession() as sess: output = sess.run(feature_layer_out) @@ -150,7 +158,7 @@ def testDenseFeatures_shareAcrossApplication(self): with tf.Graph().as_default(): # We want to test with dense_features_v2.DenseFeatures. This symbol was # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a. - feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns) + feature_layer = keras.layers.DenseFeatures(feature_columns) feature_layer_out_1 = feature_layer(features) feature_layer_out_2 = feature_layer(features) @@ -311,7 +319,7 @@ def testDenseFeatures(self): with tf.Graph().as_default(): # We want to test with dense_features_v2.DenseFeatures. This symbol was # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a. - feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns) + feature_layer = keras.layers.DenseFeatures(feature_columns) feature_layer_out = feature_layer(features) with tf.compat.v1.train.MonitoredSession() as sess: output = sess.run(feature_layer_out) @@ -333,7 +341,7 @@ def testDenseFeatures_shareAcrossApplication(self): with tf.Graph().as_default(): # We want to test with dense_features_v2.DenseFeatures. This symbol was # added in https://github.com/tensorflow/tensorflow/commit/64586f18724f737393071125a91b19adf013cf8a. - feature_layer = tf.compat.v2.keras.layers.DenseFeatures(feature_columns) + feature_layer = keras.layers.DenseFeatures(feature_columns) feature_layer_out_1 = feature_layer(features) feature_layer_out_2 = feature_layer(features) diff --git a/tensorflow_hub/feature_column_v2_test.py b/tensorflow_hub/feature_column_v2_test.py index fb3b9c0fa..cda239d39 100644 --- a/tensorflow_hub/feature_column_v2_test.py +++ b/tensorflow_hub/feature_column_v2_test.py @@ -20,6 +20,13 @@ import tensorflow.compat.v2 as tf import tensorflow_hub as hub +# pylint: disable=g-import-not-at-top +# Use Keras 2. +version_fn = getattr(tf.keras, "version", None) +if version_fn and version_fn().startswith("3."): + import tf_keras as keras +else: + keras = tf.keras # pylint: disable=g-direct-tensorflow-import from tensorflow.python.feature_column import feature_column_v2 @@ -100,7 +107,7 @@ def testDenseFeaturesDirectly(self): hub.text_embedding_column_v2("text_a", self.model, trainable=False), hub.text_embedding_column_v2("text_b", self.model, trainable=False), ] - feature_layer = tf.keras.layers.DenseFeatures(feature_columns) + feature_layer = keras.layers.DenseFeatures(feature_columns) feature_layer_out = feature_layer(features) self.assertAllEqual(feature_layer_out, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]]) @@ -114,12 +121,13 @@ def testDenseFeaturesInKeras(self): hub.text_embedding_column_v2("text", self.model, trainable=True), ] input_features = dict( - text=tf.keras.layers.Input(name="text", shape=[None], dtype=tf.string)) - dense_features = tf.keras.layers.DenseFeatures(feature_columns) + text=keras.layers.Input(name="text", shape=[None], dtype=tf.string) + ) + dense_features = keras.layers.DenseFeatures(feature_columns) x = dense_features(input_features) - x = tf.keras.layers.Dense(16, activation="relu")(x) - logits = tf.keras.layers.Dense(1, activation="linear")(x) - model = tf.keras.Model(inputs=input_features, outputs=logits) + x = keras.layers.Dense(16, activation="relu")(x) + logits = keras.layers.Dense(1, activation="linear")(x) + model = keras.Model(inputs=input_features, outputs=logits) model.compile( optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"]) model.fit(x=features, y=label, epochs=10) @@ -135,13 +143,13 @@ def testLoadingDifferentFeatureColumnsFails(self): ] # Build the first model. input_features = dict( - text_1=tf.keras.layers.Input( - name="text_1", shape=[None], dtype=tf.string)) - dense_features = tf.keras.layers.DenseFeatures(feature_columns) + text_1=keras.layers.Input(name="text_1", shape=[None], dtype=tf.string) + ) + dense_features = keras.layers.DenseFeatures(feature_columns) x = dense_features(input_features) - x = tf.keras.layers.Dense(16, activation="relu")(x) - logits = tf.keras.layers.Dense(1, activation="linear")(x) - model_1 = tf.keras.Model(inputs=input_features, outputs=logits) + x = keras.layers.Dense(16, activation="relu")(x) + logits = keras.layers.Dense(1, activation="linear")(x) + model_1 = keras.Model(inputs=input_features, outputs=logits) model_1.compile( optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"]) model_1.fit(x=features, y=label, epochs=10) @@ -155,13 +163,13 @@ def testLoadingDifferentFeatureColumnsFails(self): hub.text_embedding_column_v2("text_2", self.model, trainable=True), ] input_features = dict( - text_2=tf.keras.layers.Input( - name="text_2", shape=[None], dtype=tf.string)) - dense_features = tf.keras.layers.DenseFeatures(feature_columns) + text_2=keras.layers.Input(name="text_2", shape=[None], dtype=tf.string) + ) + dense_features = keras.layers.DenseFeatures(feature_columns) x = dense_features(input_features) - x = tf.keras.layers.Dense(16, activation="relu")(x) - logits = tf.keras.layers.Dense(1, activation="linear")(x) - model_2 = tf.keras.Model(inputs=input_features, outputs=logits) + x = keras.layers.Dense(16, activation="relu")(x) + logits = keras.layers.Dense(1, activation="linear")(x) + model_2 = keras.Model(inputs=input_features, outputs=logits) model_2.compile( optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"]) diff --git a/tensorflow_hub/keras_layer.py b/tensorflow_hub/keras_layer.py index dedeed0c5..61f276173 100644 --- a/tensorflow_hub/keras_layer.py +++ b/tensorflow_hub/keras_layer.py @@ -22,7 +22,15 @@ from tensorflow_hub import module_v2 -# pylint: disable=g-direct-tensorflow-import,g-import-not-at-top +# pylint: disable=g-import-not-at-top +# Use Keras 2. +version_fn = getattr(tf.keras, "version", None) +if version_fn and version_fn().startswith("3."): + import tf_keras as keras +else: + keras = tf.keras + +# pylint: disable=g-direct-tensorflow-import from tensorflow.python.framework import smart_cond from tensorflow.python.util import tf_inspect @@ -33,7 +41,7 @@ # pylint: enable=g-direct-tensorflow-import,g-import-not-at-top -class KerasLayer(tf.keras.layers.Layer): +class KerasLayer(keras.layers.Layer): """Wraps a SavedModel (or a legacy TF1 Hub format) as a Keras Layer. This layer wraps a callable object for use as a Keras layer. The callable @@ -51,7 +59,7 @@ class KerasLayer(tf.keras.layers.Layer): or a nest of tensors containing the inputs to the layer. If the callable accepts a `training` argument, a Python boolean is passed for it. It is True if this layer is marked trainable *and* called for training, analogous to - tf.keras.layers.BatchNormalization. (By contrast, tf.keras.layers.Dropout + keras.layers.BatchNormalization. (By contrast, keras.layers.Dropout ignores the trainable state and applies the training argument verbatim.) If present, the following attributes of callable are understood to have @@ -86,7 +94,7 @@ class KerasLayer(tf.keras.layers.Layer): `tf.estimator.RunConfig`. (This option was experimental from TF1.14 to TF2.1.) Note: The data types used by a saved model have been fixed at saving time. - Using tf.keras.mixed_precision etc. has no effect on the saved model + Using keras.mixed_precision etc. has no effect on the saved model that gets loaded by a hub.KerasLayer. Attributes: @@ -227,7 +235,7 @@ def call(self, inputs, training=None): f = functools.partial(self._callable, *args, **kwargs) # ...but we may also have to pass a Python boolean for `training`, which # is the logical "and" of this layer's trainability and what the surrounding - # model is doing (analogous to tf.keras.layers.BatchNormalization in TF2). + # model is doing (analogous to keras.layers.BatchNormalization in TF2). # For the latter, we have to look in two places: the `training` argument, # or else Keras' global `learning_phase`, which might actually be a tensor. if not self._has_training_argument: @@ -235,7 +243,7 @@ def call(self, inputs, training=None): else: if self.trainable: if training is None: - training = tf.keras.backend.learning_phase() + training = keras.backend.learning_phase() else: # Behave like BatchNormalization. (Dropout is different, b/181839368.) training = False @@ -383,7 +391,7 @@ def compute_output_shape(self, input_shape): """Computes the output shape of the layer. This relies on the `output_shape` provided during initialization, if any, - else falls back to the default behavior from `tf.keras.layers.Layer`. + else falls back to the default behavior from `keras.layers.Layer`. Args: input_shape: Shape tuple (tuple of integers) or list of shape tuples (one diff --git a/tensorflow_hub/keras_layer_test.py b/tensorflow_hub/keras_layer_test.py index b5cfbc1c1..f44e95664 100644 --- a/tensorflow_hub/keras_layer_test.py +++ b/tensorflow_hub/keras_layer_test.py @@ -22,6 +22,16 @@ import tensorflow as tf import tensorflow_hub as hub +# pylint: disable=g-import-not-at-top +# Use Keras 2. +version_fn = getattr(tf.keras, "version", None) +if version_fn and version_fn().startswith("3."): + import tf_keras # pylint: disable=unused-import + from tf_keras.api._v2 import keras as tf_keras_v2 +else: + tf_keras = tf.keras # Keras 2 + tf_keras_v2 = tf.compat.v2.keras + # NOTE: A Hub-style SavedModel can either be constructed manually, or by # relying on tf.saved_model.save(keras_model, ...) to put in the expected # endpoints. The following _save*model() helpers offer a save_from_keras @@ -44,19 +54,21 @@ def _json_cycle(x): def _save_half_plus_one_model(export_dir, save_from_keras=False): """Writes Hub-style SavedModel to compute y = wx + 1, with w trainable.""" - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) - times_w = tf.keras.layers.Dense( + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) + times_w = tf_keras_v2.layers.Dense( units=1, - kernel_initializer=tf.keras.initializers.Constant([[0.5]]), - kernel_regularizer=tf.keras.regularizers.l2(0.01), - use_bias=False) - plus_1 = tf.keras.layers.Dense( + kernel_initializer=tf_keras_v2.initializers.Constant([[0.5]]), + kernel_regularizer=tf_keras_v2.regularizers.l2(0.01), + use_bias=False, + ) + plus_1 = tf_keras_v2.layers.Dense( units=1, - kernel_initializer=tf.keras.initializers.Constant([[1.0]]), - bias_initializer=tf.keras.initializers.Constant([1.0]), - trainable=False) + kernel_initializer=tf_keras_v2.initializers.Constant([[1.0]]), + bias_initializer=tf_keras_v2.initializers.Constant([1.0]), + trainable=False, + ) outp = plus_1(times_w(inp)) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) if save_from_keras: tf.saved_model.save(model, export_dir) @@ -84,18 +96,20 @@ def _save_half_plus_one_hub_module_v1(path): def half_plus_one(): x = tf.compat.v1.placeholder(shape=(None, 1), dtype=tf.float32) - # Use TF1 native tf.compat.v1.layers instead of tf.keras.layers as they + # Use TF1 native tf_keras_v2.layers instead of tf_keras_v2.layers as they # correctly update TF collections, such as REGULARIZATION_LOSS. - times_w = tf.compat.v1.layers.Dense( + times_w = tf_keras_v2.layers.Dense( units=1, - kernel_initializer=tf.keras.initializers.Constant([[0.5]]), - kernel_regularizer=tf.keras.regularizers.l2(0.01), - use_bias=False) - plus_1 = tf.compat.v1.layers.Dense( + kernel_initializer=tf_keras_v2.initializers.Constant([[0.5]]), + kernel_regularizer=tf_keras_v2.regularizers.l2(0.01), + use_bias=False, + ) + plus_1 = tf_keras_v2.layers.Dense( units=1, - kernel_initializer=tf.keras.initializers.Constant([[1.0]]), - bias_initializer=tf.keras.initializers.Constant([1.0]), - trainable=False) + kernel_initializer=tf_keras_v2.initializers.Constant([[1.0]]), + bias_initializer=tf_keras_v2.initializers.Constant([1.0]), + trainable=False, + ) y = plus_1(times_w(x)) hub.add_signature(inputs=x, outputs=y) @@ -106,20 +120,21 @@ def half_plus_one(): def _save_2d_text_embedding(export_dir, save_from_keras=False): """Writes SavedModel to compute y = length(text)*w, with w trainable.""" - class StringLengthLayer(tf.keras.layers.Layer): + class StringLengthLayer(tf_keras_v2.layers.Layer): def call(self, inputs): return tf.strings.length(inputs) - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.string) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.string) text_length = StringLengthLayer() - times_w = tf.keras.layers.Dense( + times_w = tf_keras_v2.layers.Dense( units=2, - kernel_initializer=tf.keras.initializers.Constant([0.1, 0.3]), - kernel_regularizer=tf.keras.regularizers.l2(0.01), - use_bias=False) + kernel_initializer=tf_keras_v2.initializers.Constant([0.1, 0.3]), + kernel_regularizer=tf_keras_v2.regularizers.l2(0.01), + use_bias=False, + ) outp = times_w(text_length(inp)) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) if save_from_keras: tf.saved_model.save(model, export_dir) @@ -154,10 +169,10 @@ def _tensors_names_set(tensor_sequence): def _save_batch_norm_model(export_dir, save_from_keras=False): """Writes a Hub-style SavedModel with a batch norm layer.""" - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) - bn = tf.keras.layers.BatchNormalization(momentum=0.8) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) + bn = tf_keras_v2.layers.BatchNormalization(momentum=0.8) outp = bn(inp) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) if save_from_keras: tf.saved_model.save(model, export_dir) @@ -221,9 +236,9 @@ def _save_model_with_custom_attributes(export_dir, # Calling the module parses an integer. f = lambda a: tf.strings.to_number(a, tf.int64) if save_from_keras: - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.string) - outp = tf.keras.layers.Lambda(f)(inp) - model = tf.keras.Model(inp, outp) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.string) + outp = tf_keras_v2.layers.Lambda(f)(inp) + model = tf_keras_v2.Model(inp, outp) else: model = tf.train.Checkpoint() model.__call__ = tf.function( @@ -380,10 +395,10 @@ def testHalfPlusOneRetraining(self, model_format): _dispatch_model_format(model_format, _save_half_plus_one_model, _save_half_plus_one_hub_module_v1, export_dir) # Import the half-plus-one model into a consumer model. - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) imported = hub.KerasLayer(export_dir, trainable=True) outp = imported(inp) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) # The consumer model computes y = x/2 + 1 as expected. self.assertAllEqual( model(np.array([[0.], [8.], [10.], [12.]], dtype=np.float32)), @@ -401,7 +416,10 @@ def testHalfPlusOneRetraining(self, model_format): # Retrain on y = x/2 + 6 for x near 10. # (Console output should show loss below 0.2.) model.compile( - tf.keras.optimizers.SGD(0.002), "mean_squared_error", run_eagerly=True) + tf_keras_v2.optimizers.SGD(0.002), + "mean_squared_error", + run_eagerly=True, + ) x = [[9.], [10.], [11.]] * 10 y = [[xi[0] / 2. + 6] for xi in x] model.fit(np.array(x), np.array(y), batch_size=len(x), epochs=10, verbose=2) @@ -425,10 +443,10 @@ def testRegularizationLoss(self, model_format): _dispatch_model_format(model_format, _save_half_plus_one_model, _save_half_plus_one_hub_module_v1, export_dir) # Import the half-plus-one model into a consumer model. - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) imported = hub.KerasLayer(export_dir, trainable=False) outp = imported(inp) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) # When untrainable, the layer does not contribute regularization losses. self.assertAllEqual(model.losses, np.array([0.], dtype=np.float32)) # When trainable (even set after the fact), the layer forwards its losses. @@ -445,17 +463,18 @@ def testBatchNormRetraining(self, save_from_keras): """Tests imported batch norm with trainable=True.""" export_dir = os.path.join(self.get_temp_dir(), "batch-norm") _save_batch_norm_model(export_dir, save_from_keras=save_from_keras) - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) imported = hub.KerasLayer(export_dir, trainable=True) var_beta, var_gamma, var_mean, var_variance = _get_batch_norm_vars(imported) outp = imported(inp) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) # Retrain the imported batch norm layer on a fixed batch of inputs, # which has mean 12.0 and some variance of a less obvious value. # The module learns scale and offset parameters that achieve the # mapping x --> 2*x for the observed mean and variance. model.compile( - tf.keras.optimizers.SGD(0.1), "mean_squared_error", run_eagerly=True) + tf_keras_v2.optimizers.SGD(0.1), "mean_squared_error", run_eagerly=True + ) x = [[11.], [12.], [13.]] y = [[2 * xi[0]] for xi in x] model.fit(np.array(x), np.array(y), batch_size=len(x), epochs=100) @@ -478,15 +497,16 @@ def testBatchNormFreezing(self, save_from_keras): """Tests imported batch norm with trainable=False.""" export_dir = os.path.join(self.get_temp_dir(), "batch-norm") _save_batch_norm_model(export_dir, save_from_keras=save_from_keras) - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) imported = hub.KerasLayer(export_dir, trainable=False) var_beta, var_gamma, var_mean, var_variance = _get_batch_norm_vars(imported) - dense = tf.keras.layers.Dense( + dense = tf_keras_v2.layers.Dense( units=1, - kernel_initializer=tf.keras.initializers.Constant([[1.5]]), - use_bias=False) + kernel_initializer=tf_keras_v2.initializers.Constant([[1.5]]), + use_bias=False, + ) outp = dense(imported(inp)) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) # Training the model to x --> 2*x leaves the batch norm layer entirely # unchanged (both trained beta&gamma and aggregated mean&variance). self.assertAllClose(var_beta.numpy(), np.array([0.0])) @@ -494,7 +514,8 @@ def testBatchNormFreezing(self, save_from_keras): self.assertAllClose(var_mean.numpy(), np.array([0.0])) self.assertAllClose(var_variance.numpy(), np.array([1.0])) model.compile( - tf.keras.optimizers.SGD(0.1), "mean_squared_error", run_eagerly=True) + tf_keras_v2.optimizers.SGD(0.1), "mean_squared_error", run_eagerly=True + ) x = [[1.], [2.], [3.]] y = [[2 * xi[0]] for xi in x] model.fit(np.array(x), np.array(y), batch_size=len(x), epochs=20) @@ -529,8 +550,8 @@ def testInputOutputDict(self, pass_output_shapes): export_dir = os.path.join(self.get_temp_dir(), "with-dicts") _save_model_with_dict_input_output(export_dir) # Build a Model from it using Keras' "functional" API. - x_in = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) - y_in = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) + x_in = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) + y_in = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) dict_in = dict(x=x_in, y=y_in) kwargs = dict(arguments=dict(return_dict=True)) # For the SavedModel. if pass_output_shapes: @@ -542,8 +563,8 @@ def testInputOutputDict(self, pass_output_shapes): dict_out = imported(dict_in) delta_out = dict_out["delta"] sigma_out = dict_out["sigma"] - concat_out = tf.keras.layers.concatenate([delta_out, sigma_out]) - model = tf.keras.Model(dict_in, [delta_out, sigma_out, concat_out]) + concat_out = tf_keras_v2.layers.concatenate([delta_out, sigma_out]) + model = tf_keras_v2.Model(dict_in, [delta_out, sigma_out, concat_out]) # Test the model. x = np.array([[11.], [22.], [33.]], dtype=np.float32) y = np.array([[1.], [2.], [3.]], dtype=np.float32) @@ -571,10 +592,10 @@ def testOutputShapeList(self, pass_output_shapes): kwargs = {} if pass_output_shapes: kwargs["output_shape"] = [[1], [2, 2], [3, 3, 3]] - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.float32) + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.float32) imported = hub.KerasLayer(export_dir, **kwargs) outp = imported(inp) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) x = np.array([[1.], [10.]], dtype=np.float32) outputs = model(x) @@ -637,21 +658,22 @@ def testResaveWithMixedPrecision(self, save_from_keras): # model is used in this test. _save_2d_text_embedding(export_dir1, save_from_keras=save_from_keras) try: - tf.compat.v2.keras.mixed_precision.set_global_policy("mixed_float16") - inp = tf.keras.layers.Input(shape=(1,), dtype=tf.string) + tf_keras_v2.mixed_precision.set_global_policy("mixed_float16") + inp = tf_keras_v2.layers.Input(shape=(1,), dtype=tf.string) imported = hub.KerasLayer(export_dir1, trainable=True) outp = imported(inp) - model = tf.keras.Model(inp, outp) + model = tf_keras_v2.Model(inp, outp) model.compile( - tf.keras.optimizers.SGD(0.002, momentum=0.001), + tf_keras_v2.optimizers.SGD(0.002, momentum=0.001), "mean_squared_error", - run_eagerly=True) + run_eagerly=True, + ) x = [["a"], ["aa"], ["aaa"]] y = [len(xi) for xi in x] model.fit(x, y) tf.saved_model.save(model, export_dir2) finally: - tf.compat.v2.keras.mixed_precision.set_global_policy("float32") + tf_keras_v2.mixed_precision.set_global_policy("float32") def testComputeOutputShapeNonEager(self): export_dir = os.path.join(self.get_temp_dir(), "half-plus-one") @@ -699,13 +721,14 @@ def testSaveModelConfig(self, save_from_keras): export_dir = os.path.join(self.get_temp_dir(), "half-plus-one") _save_half_plus_one_model(export_dir, save_from_keras=save_from_keras) - model = tf.keras.Sequential([hub.KerasLayer(export_dir)]) + model = tf_keras_v2.Sequential([hub.KerasLayer(export_dir)]) in_value = np.array([[10.]], dtype=np.float32) result = model(in_value).numpy() json_string = model.to_json() - new_model = tf.keras.models.model_from_json( - json_string, custom_objects={"KerasLayer": hub.KerasLayer}) + new_model = tf_keras_v2.models.model_from_json( + json_string, custom_objects={"KerasLayer": hub.KerasLayer} + ) new_result = new_model(in_value).numpy() self.assertEqual(result, new_result) diff --git a/tensorflow_hub/native_module_test.py b/tensorflow_hub/native_module_test.py index efe7a2abd..d4cdfc253 100644 --- a/tensorflow_hub/native_module_test.py +++ b/tensorflow_hub/native_module_test.py @@ -24,6 +24,16 @@ from tensorflow_hub import native_module from tensorflow_hub import tf_utils +# pylint: disable=g-import-not-at-top +# Use Keras 2. +version_fn = getattr(tf.keras, "version", None) +if version_fn and version_fn().startswith("3."): + import tf_keras # pylint: disable=unused-import + from tf_keras.api._v1 import keras as tf_keras_v1 # pylint: disable=unused-import +else: + tf_keras = tf.keras # Keras 2 + tf_keras_v1 = tf.compat.v1.keras + # pylint: disable=g-direct-tensorflow-import from tensorflow.python.framework import function from tensorflow.python.framework import test_util @@ -49,7 +59,9 @@ def multi_signature_module(): def batch_norm_module(training): x = tf.compat.v1.placeholder(tf.float32, shape=[None, 3]) - y = tf.compat.v1.layers.batch_normalization(x, training=training) + y = tf_keras_v1.__internal__.legacy.layers.batch_normalization( + x, training=training + ) native_module.add_signature(inputs=x, outputs=y) @@ -1051,11 +1063,9 @@ def l2(weights): with tf.control_dependencies([weights]): return 2.0 * tf.compat.v1.nn.l2_loss(weights) - h = tf.compat.v1.layers.dense( - x, 2, - activation=None, - kernel_regularizer=l2, - bias_regularizer=l2) + h = tf_keras_v1.__internal__.legacy.layers.dense( + x, 2, activation=None, kernel_regularizer=l2, bias_regularizer=l2 + ) hub.add_signature(inputs=x, outputs=h) @@ -1317,11 +1327,9 @@ def testUpdateOps(self): def batch_norm_module_fn(is_training): """Module that exercises batch normalization, incl. UPDATE_OPS.""" x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 1], name="x") - y = tf.compat.v1.layers.batch_normalization( - momentum=0.4, - inputs=x, - fused=False, - training=is_training) + y = tf_keras_v1.__internal__.legacy.layers.batch_normalization( + momentum=0.4, inputs=x, fused=False, training=is_training + ) hub.add_signature(inputs=x, outputs=y) @@ -1883,7 +1891,7 @@ def testRegisterLinkedOpsError(self): class TFHubExportSpecTest(tf.test.TestCase): def f(self, x, dim=10): - return tf.compat.v1.layers.dense(x, dim) + return tf_keras_v1.__internal__.legacy.layers.dense(x, dim) def module_fn(self, dim=10): x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, dim]) @@ -1900,7 +1908,7 @@ def createCheckpoint(self, scope=None): y = self.f(x) else: y = self.f(x) - tf.compat.v1.layers.dense(y, 20) + tf_keras_v1.__internal__.legacy.layers.dense(y, 20) saver = tf.compat.v1.train.Saver() init_op = tf.compat.v1.initializers.global_variables() diff --git a/tensorflow_hub/pip_package/setup.py b/tensorflow_hub/pip_package/setup.py index 859d05fe6..69a13bc9d 100644 --- a/tensorflow_hub/pip_package/setup.py +++ b/tensorflow_hub/pip_package/setup.py @@ -28,6 +28,7 @@ REQUIRED_PACKAGES = [ 'numpy >= 1.12.0', 'protobuf >= 3.19.6', # No less than what ../WORKSPACE uses. + 'tf-keras >= 2.14.1', ] project_name = 'tensorflow-hub'