From 10138bec854e295910c5377d368028f2e50c9f33 Mon Sep 17 00:00:00 2001 From: jfrery Date: Fri, 15 Nov 2024 13:38:37 +0100 Subject: [PATCH] chore: fix tests and refresh LoraMLP notebook --- docs/advanced_examples/LoraMLP.ipynb | 88 +++++++++++++------------- src/concrete/ml/torch/hybrid_model.py | 17 ++--- tests/torch/test_hybrid_converter.py | 2 +- use_case_examples/deployment/README.md | 16 ++--- 4 files changed, 62 insertions(+), 61 deletions(-) diff --git a/docs/advanced_examples/LoraMLP.ipynb b/docs/advanced_examples/LoraMLP.ipynb index af17b90fc..7b6dc6e7c 100644 --- a/docs/advanced_examples/LoraMLP.ipynb +++ b/docs/advanced_examples/LoraMLP.ipynb @@ -21,7 +21,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 1, @@ -324,7 +324,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\r\n", + "\r", "Training: 0%| | 0/10 [00:00 torch.Tensor: # Validate the FHE mode fhe_mode = HybridFHEMode(fhe) - if _HAS_GLWE_BACKEND and self._has_large_linear_layers: + if _HAS_GLWE_BACKEND and self._has_only_large_linear_layers: if fhe_mode == HybridFHEMode.SIMULATE: raise AssertionError( "When the HybridFHEModel is instantiated with only " @@ -468,9 +468,10 @@ def forward(self, x: torch.Tensor, fhe: str = "disable") -> torch.Tensor: # Initialize executor only if not already done if self.executor is None: self.executor = GLWELinearLayerExecutor() - # Generate keys only if needed and not already done - if fhe_mode != HybridFHEMode.DISABLE: - self.executor.keygen() + + # Generate keys only if needed and not already done + if fhe_mode != HybridFHEMode.DISABLE and self.executor.private_key is None: + self.executor.keygen() # Update executor for all remote modules for module in self.remote_modules.values(): @@ -580,7 +581,7 @@ def compile_model( # If all layers are linear and the GLWE backend is available # then simply quantize the model without compiling with # Concrete Python. - if self._has_large_linear_layers and _HAS_GLWE_BACKEND: + if self._has_only_large_linear_layers and _HAS_GLWE_BACKEND: self.private_q_modules[name] = build_quantized_module( self.private_modules[name], calibration_data_tensor, diff --git a/tests/torch/test_hybrid_converter.py b/tests/torch/test_hybrid_converter.py index 1f25ac499..de5724c6d 100644 --- a/tests/torch/test_hybrid_converter.py +++ b/tests/torch/test_hybrid_converter.py @@ -328,7 +328,7 @@ def prepare_data(x, y, test_size=0.1, random_state=42): # were linear and were replaced with the GLWE backend # Check if GLWE optimization should be used based on input dimension should_use_glwe = n_hidden >= 512 - is_pure_linear = hybrid_local._has_large_linear_layers # pylint: disable=protected-access + is_pure_linear = hybrid_local._has_only_large_linear_layers # pylint: disable=protected-access assert is_pure_linear == should_use_glwe hybrid_local.compile_model(x1_train, n_bits=10) diff --git a/use_case_examples/deployment/README.md b/use_case_examples/deployment/README.md index 38baac25b..fed7b713e 100644 --- a/use_case_examples/deployment/README.md +++ b/use_case_examples/deployment/README.md @@ -7,26 +7,26 @@ This folder contains examples of how to deploy Concrete ML models using Fully Ho The deployment process generally follows these steps: 1. Train the model (optional, depending on the use case) -2. Compile the model to an FHE circuit -3. Deploy the model using Docker -4. Run inference using a client (locally or in Docker) +1. Compile the model to an FHE circuit +1. Deploy the model using Docker +1. Run inference using a client (locally or in Docker) ## Available Examples We provide three different use cases to demonstrate the deployment process: 1. [Breast Cancer Classification](./breast_cancer/README.md) -2. [Sentiment Analysis](./sentiment_analysis/README.md) -3. [CIFAR-10 Image Classification](./cifar/README.md) +1. [Sentiment Analysis](./sentiment_analysis/README.md) +1. [CIFAR-10 Image Classification](./cifar/README.md) ## Getting Started Each example folder contains its own README with specific instructions. However, the general process is similar: 1. Train or compile the model using the provided scripts -2. Deploy the model using `deploy_to_docker.py` from the `server` folder -3. Build the client Docker image -4. Run the client to interact with the deployed model +1. Deploy the model using `deploy_to_docker.py` from the `server` folder +1. Build the client Docker image +1. Run the client to interact with the deployed model For detailed instructions, please refer to the README in each example folder.