From a9d5f978b353149a84a517c8811b7dbf9ae22557 Mon Sep 17 00:00:00 2001 From: Marina Date: Wed, 2 Aug 2023 22:41:58 +0000 Subject: [PATCH] black formatting --- retvec/tf/layers/tokenizer.py | 4 ++-- retvec/tf/models/positional_embeddings.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/retvec/tf/layers/tokenizer.py b/retvec/tf/layers/tokenizer.py index e48a1a1..6360576 100644 --- a/retvec/tf/layers/tokenizer.py +++ b/retvec/tf/layers/tokenizer.py @@ -57,7 +57,7 @@ class RETVecTokenizer(tf.keras.layers.Layer): def __init__( self, sequence_length: int = 128, - model: Optional[Union[str, Path]] = 'retvec-v1', + model: Optional[Union[str, Path]] = "retvec-v1", trainable: bool = False, sep: str = "", standardize: Optional[str] = None, @@ -79,7 +79,7 @@ def __init__( `sequence_length` words. model: Path to saved pretrained RETVec model, str or pathlib.Path - object. 'retvec-v1' to use V1 of the pre-trained RETVec word + object. "retvec-v1" to use V1 of the pre-trained RETVec word embedding model, None to use the default RETVec character encoding. diff --git a/retvec/tf/models/positional_embeddings.py b/retvec/tf/models/positional_embeddings.py index 58ab0f3..f455cf6 100644 --- a/retvec/tf/models/positional_embeddings.py +++ b/retvec/tf/models/positional_embeddings.py @@ -132,7 +132,7 @@ def __init__( self._hidden_size = hidden_size self._min_timescale = min_timescale self._max_timescale = max_timescale - self._init_scale = 1 / self._hidden_size**0.5 + self._init_scale = 1 / self._hidden_size ** 0.5 self._scale = self.add_weight( name="sin_scale", @@ -217,7 +217,7 @@ def rope(x: Tensor, axis: Union[List[int], int]) -> Tensor: half_size = shape[-1] // 2 freq_seq = tf.cast(tf.range(half_size), tf.float32) / float(half_size) - inv_freq = 10000**-freq_seq + inv_freq = 10000 ** -freq_seq sinusoid = tf.einsum("...,d->...d", position, inv_freq) sin = tf.cast(tf.sin(sinusoid), dtype=x.dtype) cos = tf.cast(tf.cos(sinusoid), dtype=x.dtype)