diff --git a/benchmarks/classification.py b/benchmarks/classification.py index 50e46ef39..c4dc82ed7 100644 --- a/benchmarks/classification.py +++ b/benchmarks/classification.py @@ -109,7 +109,7 @@ def main(): # Listing if args.long_list or args.short_list: already_done_models = {} - for (dataset_i, model_class_i, config_i) in all_classification_tasks: + for dataset_i, model_class_i, config_i in all_classification_tasks: config_n = json.dumps(config_i).replace("'", '"') model_name_i = model_class_i.__name__ diff --git a/benchmarks/common.py b/benchmarks/common.py index c06a9e171..a3445f679 100644 --- a/benchmarks/common.py +++ b/benchmarks/common.py @@ -274,7 +274,7 @@ def run_and_report_classification_metrics( (f1_score, "f1", "F1Score"), ] - for (metric, metric_id, metric_label) in metric_info: + for metric, metric_id, metric_label in metric_info: run_and_report_metric( y_gt, y_pred, @@ -288,7 +288,7 @@ def run_and_report_regression_metrics(y_gt, y_pred, metric_id_prefix, metric_lab """Run several metrics and report results to progress tracker with computed name and id""" metric_info = [(r2_score, "r2_score", "R2Score"), (mean_squared_error, "MSE", "MSE")] - for (metric, metric_id, metric_label) in metric_info: + for metric, metric_id, metric_label in metric_info: run_and_report_metric( y_gt, y_pred, @@ -768,6 +768,7 @@ def benchmark_name_generator( # - The functions support all models # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/1866 + # pylint: disable-next=too-many-branches, redefined-outer-name def benchmark_name_to_config( benchmark_name: str, joiner: str = "_" diff --git a/benchmarks/deep_learning.py b/benchmarks/deep_learning.py index a140638c5..dd8c81aae 100644 --- a/benchmarks/deep_learning.py +++ b/benchmarks/deep_learning.py @@ -886,7 +886,7 @@ def main(): if args.long_list or args.short_list: # Print the short or long lists if asked and stop printed_models = set() - for (dataset, cnn_class, config) in all_tasks: + for dataset, cnn_class, config in all_tasks: configs = json.dumps(config).replace("'", '"') cnn_name = cnn_class.__name__ diff --git a/benchmarks/glm.py b/benchmarks/glm.py index d2ae5bafc..b3b2302e2 100644 --- a/benchmarks/glm.py +++ b/benchmarks/glm.py @@ -113,14 +113,20 @@ def get_preprocessor() -> ColumnTransformer: def get_train_test_data(data: pandas.DataFrame) -> Tuple[pandas.DataFrame, pandas.DataFrame]: """Split the data into a train and test set.""" - train_data, test_data, = train_test_split( + ( + train_data, + test_data, + ) = train_test_split( data, test_size=0.2, random_state=0, ) # The test set is reduced for faster FHE runs. - _, test_data, = train_test_split( + ( + _, + test_data, + ) = train_test_split( test_data, test_size=500, random_state=0, diff --git a/benchmarks/regression.py b/benchmarks/regression.py index d5e699b74..d7229c468 100644 --- a/benchmarks/regression.py +++ b/benchmarks/regression.py @@ -109,7 +109,7 @@ def main(): # Listing if args.long_list or args.short_list: already_done_models = {} - for (dataset_i, model_class_i, config_i) in all_tasks: + for dataset_i, model_class_i, config_i in all_tasks: config_n = json.dumps(config_i).replace("'", '"') model_name_i = model_class_i.__name__ diff --git a/conftest.py b/conftest.py index 1606c8ecb..6326ddd1a 100644 --- a/conftest.py +++ b/conftest.py @@ -1,4 +1,5 @@ """PyTest configuration file.""" + import hashlib import json import random diff --git a/deps_licenses/licenses_mac_silicon_user.txt b/deps_licenses/licenses_mac_silicon_user.txt index 1def4a274..d0f5eec75 100644 --- a/deps_licenses/licenses_mac_silicon_user.txt +++ b/deps_licenses/licenses_mac_silicon_user.txt @@ -31,7 +31,7 @@ jsonpickle, 3.0.4, BSD License mpmath, 1.3.0, BSD License networkx, 3.1, BSD License numpy, 1.23.5, BSD License -onnx, 1.16.0, Apache License v2.0 +onnx, 1.15.0, Apache License v2.0 onnxconverter-common, 1.13.0, MIT License onnxmltools, 1.11.0, Apache Software License onnxoptimizer, 0.3.13, Apache License v2.0 diff --git a/deps_licenses/licenses_mac_silicon_user.txt.md5 b/deps_licenses/licenses_mac_silicon_user.txt.md5 index 71e76d1fb..5ec742f86 100644 --- a/deps_licenses/licenses_mac_silicon_user.txt.md5 +++ b/deps_licenses/licenses_mac_silicon_user.txt.md5 @@ -1 +1 @@ -6a81dc377a7c438d85a5f2200ecc6a76 +180ea2b3232d89fc71be20d1707620b4 diff --git a/docker/release_resources/sanity_check.py b/docker/release_resources/sanity_check.py index 4daab741d..221ef1fc6 100644 --- a/docker/release_resources/sanity_check.py +++ b/docker/release_resources/sanity_check.py @@ -1,4 +1,5 @@ """Sanity checks, to be sure that our package is usable""" + import argparse import random import shutil diff --git a/docs/advanced_examples/KNearestNeighbors.ipynb b/docs/advanced_examples/KNearestNeighbors.ipynb index 8ce433c35..6f6e6ed20 100644 --- a/docs/advanced_examples/KNearestNeighbors.ipynb +++ b/docs/advanced_examples/KNearestNeighbors.ipynb @@ -392,9 +392,11 @@ "def highlight_diff(row):\n", " \"\"\"Custom style function to highlight mismatched predictions.\"\"\"\n", " return [\n", - " \"background-color: yellow\"\n", - " if row[\"Majority vote (Concrete ML)\"] != row[\"Majority vote (scikit-learn)\"]\n", - " else \"\"\n", + " (\n", + " \"background-color: yellow\"\n", + " if row[\"Majority vote (Concrete ML)\"] != row[\"Majority vote (scikit-learn)\"]\n", + " else \"\"\n", + " )\n", " ] * len(row)\n", "\n", "\n", diff --git a/poetry.lock b/poetry.lock index e0eb876a5..dd89d298a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -379,18 +379,18 @@ files = [ [[package]] name = "backports-tarfile" -version = "1.0.0" +version = "1.1.0" description = "Backport of CPython tarfile module" optional = false python-versions = ">=3.8" files = [ - {file = "backports.tarfile-1.0.0-py3-none-any.whl", hash = "sha256:bcd36290d9684beb524d3fe74f4a2db056824c47746583f090b8e55daf0776e4"}, - {file = "backports.tarfile-1.0.0.tar.gz", hash = "sha256:2688f159c21afd56a07b75f01306f9f52c79aebcc5f4a117fb8fbb4445352c75"}, + {file = "backports.tarfile-1.1.0-py3-none-any.whl", hash = "sha256:b2f4df351db942d094db94588bbf2c6938697a5f190f44c934acc697da56008b"}, + {file = "backports_tarfile-1.1.0.tar.gz", hash = "sha256:91d59138ea401ee2a95e8b839c1e2f51f3e9ca76bdba8b6a29f8d773564686a8"}, ] [package.extras] docs = ["furo", "jaraco.packaging (>=9.3)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)"] +testing = ["jaraco.test", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)"] [[package]] name = "beautifulsoup4" @@ -3887,46 +3887,40 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "onnx" -version = "1.16.0" +version = "1.15.0" description = "Open Neural Network Exchange" optional = false python-versions = ">=3.8" files = [ - {file = "onnx-1.16.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9eadbdce25b19d6216f426d6d99b8bc877a65ed92cbef9707751c6669190ba4f"}, - {file = "onnx-1.16.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:034ae21a2aaa2e9c14119a840d2926d213c27aad29e5e3edaa30145a745048e1"}, - {file = "onnx-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec22a43d74eb1f2303373e2fbe7fbcaa45fb225f4eb146edfed1356ada7a9aea"}, - {file = "onnx-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:298f28a2b5ac09145fa958513d3d1e6b349ccf86a877dbdcccad57713fe360b3"}, - {file = "onnx-1.16.0-cp310-cp310-win32.whl", hash = "sha256:66300197b52beca08bc6262d43c103289c5d45fde43fb51922ed1eb83658cf0c"}, - {file = "onnx-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:ae0029f5e47bf70a1a62e7f88c80bca4ef39b844a89910039184221775df5e43"}, - {file = "onnx-1.16.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:f51179d4af3372b4f3800c558d204b592c61e4b4a18b8f61e0eea7f46211221a"}, - {file = "onnx-1.16.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:5202559070afec5144332db216c20f2fff8323cf7f6512b0ca11b215eacc5bf3"}, - {file = "onnx-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77579e7c15b4df39d29465b216639a5f9b74026bdd9e4b6306cd19a32dcfe67c"}, - {file = "onnx-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e60ca76ac24b65c25860d0f2d2cdd96d6320d062a01dd8ce87c5743603789b8"}, - {file = "onnx-1.16.0-cp311-cp311-win32.whl", hash = "sha256:81b4ee01bc554e8a2b11ac6439882508a5377a1c6b452acd69a1eebb83571117"}, - {file = "onnx-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:7449241e70b847b9c3eb8dae622df8c1b456d11032a9d7e26e0ee8a698d5bf86"}, - {file = "onnx-1.16.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:03a627488b1a9975d95d6a55582af3e14c7f3bb87444725b999935ddd271d352"}, - {file = "onnx-1.16.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c392faeabd9283ee344ccb4b067d1fea9dfc614fa1f0de7c47589efd79e15e78"}, - {file = "onnx-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0efeb46985de08f0efe758cb54ad3457e821a05c2eaf5ba2ccb8cd1602c08084"}, - {file = "onnx-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddf14a3d32234f23e44abb73a755cb96a423fac7f004e8f046f36b10214151ee"}, - {file = "onnx-1.16.0-cp312-cp312-win32.whl", hash = "sha256:62a2e27ae8ba5fc9b4a2620301446a517b5ffaaf8566611de7a7c2160f5bcf4c"}, - {file = "onnx-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:3e0860fea94efde777e81a6f68f65761ed5e5f3adea2e050d7fbe373a9ae05b3"}, - {file = "onnx-1.16.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:70a90649318f3470985439ea078277c9fb2a2e6e2fd7c8f3f2b279402ad6c7e6"}, - {file = "onnx-1.16.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:71839546b7f93be4fa807995b182ab4b4414c9dbf049fee11eaaced16fcf8df2"}, - {file = "onnx-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7665217c45a61eb44718c8e9349d2ad004efa0cb9fbc4be5c6d5e18b9fe12b52"}, - {file = "onnx-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5752bbbd5717304a7643643dba383a2fb31e8eb0682f4e7b7d141206328a73b"}, - {file = "onnx-1.16.0-cp38-cp38-win32.whl", hash = "sha256:257858cbcb2055284f09fa2ae2b1cfd64f5850367da388d6e7e7b05920a40c90"}, - {file = "onnx-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:209fe84995a28038e29ae8369edd35f33e0ef1ebc3bddbf6584629823469deb1"}, - {file = "onnx-1.16.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:8cf3e518b1b1b960be542e7c62bed4e5219e04c85d540817b7027029537dec92"}, - {file = "onnx-1.16.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:30f02beaf081c7d9fa3a8c566a912fc4408e28fc33b1452d58f890851691d364"}, - {file = "onnx-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fb29a9a692b522deef1f6b8f2145da62c0c43ea1ed5b4c0f66f827fdc28847d"}, - {file = "onnx-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7755cbd5f4e47952e37276ea5978a46fc8346684392315902b5ed4a719d87d06"}, - {file = "onnx-1.16.0-cp39-cp39-win32.whl", hash = "sha256:7532343dc5b8b5e7c3e3efa441a3100552f7600155c4db9120acd7574f64ffbf"}, - {file = "onnx-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:d7886c05aa6d583ec42f6287678923c1e343afc4350e49d5b36a0023772ffa22"}, - {file = "onnx-1.16.0.tar.gz", hash = "sha256:237c6987c6c59d9f44b6136f5819af79574f8d96a760a1fa843bede11f3822f7"}, -] - -[package.dependencies] -numpy = ">=1.20" + {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:51cacb6aafba308aaf462252ced562111f6991cdc7bc57a6c554c3519453a8ff"}, + {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:0aee26b6f7f7da7e840de75ad9195a77a147d0662c94eaa6483be13ba468ffc1"}, + {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baf6ef6c93b3b843edb97a8d5b3d229a1301984f3f8dee859c29634d2083e6f9"}, + {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ed899fe6000edc05bb2828863d3841cfddd5a7cf04c1a771f112e94de75d9f"}, + {file = "onnx-1.15.0-cp310-cp310-win32.whl", hash = "sha256:f1ad3d77fc2f4b4296f0ac2c8cadd8c1dcf765fc586b737462d3a0fe8f7c696a"}, + {file = "onnx-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:ca4ebc4f47109bfb12c8c9e83dd99ec5c9f07d2e5f05976356c6ccdce3552010"}, + {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:233ffdb5ca8cc2d960b10965a763910c0830b64b450376da59207f454701f343"}, + {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:51fa79c9ea9af033638ec51f9177b8e76c55fad65bb83ea96ee88fafade18ee7"}, + {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f277d4861729f5253a51fa41ce91bfec1c4574ee41b5637056b43500917295ce"}, + {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8a7c94d2ebead8f739fdb70d1ce5a71726f4e17b3e5b8ad64455ea1b2801a85"}, + {file = "onnx-1.15.0-cp311-cp311-win32.whl", hash = "sha256:17dcfb86a8c6bdc3971443c29b023dd9c90ff1d15d8baecee0747a6b7f74e650"}, + {file = "onnx-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:60a3e28747e305cd2e766e6a53a0a6d952cf9e72005ec6023ce5e07666676a4e"}, + {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6b5c798d9e0907eaf319e3d3e7c89a2ed9a854bcb83da5fefb6d4c12d5e90721"}, + {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a4f774ff50092fe19bd8f46b2c9b27b1d30fbd700c22abde48a478142d464322"}, + {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2b0e7f3938f2d994c34616bfb8b4b1cebbc4a0398483344fe5e9f2fe95175e6"}, + {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49cebebd0020a4b12c1dd0909d426631212ef28606d7e4d49463d36abe7639ad"}, + {file = "onnx-1.15.0-cp38-cp38-win32.whl", hash = "sha256:1fdf8a3ff75abc2b32c83bf27fb7c18d6b976c9c537263fadd82b9560fe186fa"}, + {file = "onnx-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:763e55c26e8de3a2dce008d55ae81b27fa8fb4acbb01a29b9f3c01f200c4d676"}, + {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:b2d5e802837629fc9c86f19448d19dd04d206578328bce202aeb3d4bedab43c4"}, + {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9a9cfbb5e5d5d88f89d0dfc9df5fb858899db874e1d5ed21e76c481f3cafc90d"}, + {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f472bbe5cb670a0a4a4db08f41fde69b187a009d0cb628f964840d3f83524e9"}, + {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf2de9bef64792e5b8080c678023ac7d2b9e05d79a3e17e92cf6a4a624831d2"}, + {file = "onnx-1.15.0-cp39-cp39-win32.whl", hash = "sha256:ef4d9eb44b111e69e4534f3233fc2c13d1e26920d24ae4359d513bd54694bc6d"}, + {file = "onnx-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:95d7a3e2d79d371e272e39ae3f7547e0b116d0c7f774a4004e97febe6c93507f"}, + {file = "onnx-1.15.0.tar.gz", hash = "sha256:b18461a7d38f286618ca2a6e78062a2a9c634ce498e631e708a8041b00094825"}, +] + +[package.dependencies] +numpy = "*" protobuf = ">=3.20.2" [package.extras] @@ -7613,4 +7607,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<3.11" -content-hash = "4cfb9366319177b7a2c4f692b6d9ac4570d798f935e56ae8f0b3a56f2ecc218e" +content-hash = "0bdd58e03389819d8a074956b4a14f1e2c22541697d64c17276c432313295cc5" diff --git a/pyproject.toml b/pyproject.toml index 638718fbd..ee555f863 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,11 +42,13 @@ skorch = "0.11.0" torch = "1.13.1" typing-extensions = "4.5.0" brevitas = "0.8.0" +onnx = "1.15.0" onnxoptimizer = "0.3.13" +# onnxruntime versions supported by onnx versions and opsets can be found here : +# https://onnxruntime.ai/docs/reference/compatibility.html#onnx-opset-support onnxruntime = "1.17.3" hummingbird-ml = {version="0.4.8", extras = ["onnx"]} scikit-learn = "1.1.3" -onnx = "1.16.0" scipy = "1.10.1" numpy = "1.23.5" protobuf = "3.20.3" diff --git a/script/actions_utils/escape_quotes.py b/script/actions_utils/escape_quotes.py index e039675e2..aab5e6cb5 100644 --- a/script/actions_utils/escape_quotes.py +++ b/script/actions_utils/escape_quotes.py @@ -1,4 +1,5 @@ """Script to escape double quotes within brackets or curly braces.""" + import argparse parser = argparse.ArgumentParser(description="Escape double quotes in a string") diff --git a/script/actions_utils/generate_scripts_benchmark.py b/script/actions_utils/generate_scripts_benchmark.py index aa650e73b..b9fc97a1b 100644 --- a/script/actions_utils/generate_scripts_benchmark.py +++ b/script/actions_utils/generate_scripts_benchmark.py @@ -1,4 +1,5 @@ """Script to generate the list of commands to run all benchmarks""" + import argparse import datetime import json diff --git a/script/actions_utils/json_length.py b/script/actions_utils/json_length.py index 3a8ba7f7f..e9998a499 100644 --- a/script/actions_utils/json_length.py +++ b/script/actions_utils/json_length.py @@ -1,4 +1,5 @@ """Script to evaluate the length of a json file""" + import argparse import json from pathlib import Path diff --git a/script/actions_utils/monitor.py b/script/actions_utils/monitor.py index 37f8765e1..df6e1b8e7 100644 --- a/script/actions_utils/monitor.py +++ b/script/actions_utils/monitor.py @@ -1,4 +1,5 @@ """Module to generate figure of evolution of Concrete ML-CI time on main for last 4 weeks.""" + import argparse import datetime import json diff --git a/script/actions_utils/pytest_failed_test_report.py b/script/actions_utils/pytest_failed_test_report.py index 7483e3902..db24dc052 100755 --- a/script/actions_utils/pytest_failed_test_report.py +++ b/script/actions_utils/pytest_failed_test_report.py @@ -1,4 +1,5 @@ """Pytest JSON report on failed tests utils.""" + import argparse import json from pathlib import Path diff --git a/script/actions_utils/refresh_notebooks_list.py b/script/actions_utils/refresh_notebooks_list.py index 068e10828..0ad74f75c 100644 --- a/script/actions_utils/refresh_notebooks_list.py +++ b/script/actions_utils/refresh_notebooks_list.py @@ -1,4 +1,5 @@ """Update the list of available notebooks for the refresh_one_notebook GitHib action.""" + import argparse from pathlib import Path diff --git a/script/actions_utils/run_commands.py b/script/actions_utils/run_commands.py index e70545d0e..561f134eb 100644 --- a/script/actions_utils/run_commands.py +++ b/script/actions_utils/run_commands.py @@ -1,4 +1,5 @@ """Script to run commands from a json file""" + import argparse import json import subprocess diff --git a/script/doc_utils/gen_supported_ops.py b/script/doc_utils/gen_supported_ops.py index df854dd8b..da93556e0 100644 --- a/script/doc_utils/gen_supported_ops.py +++ b/script/doc_utils/gen_supported_ops.py @@ -1,4 +1,5 @@ """Update list of supported functions in the doc.""" + import argparse from pathlib import Path diff --git a/script/make_utils/actionlint_check_with_whitelists.py b/script/make_utils/actionlint_check_with_whitelists.py index 88fe5c65c..e7d3be9be 100644 --- a/script/make_utils/actionlint_check_with_whitelists.py +++ b/script/make_utils/actionlint_check_with_whitelists.py @@ -1,4 +1,5 @@ """ Check an actionlint log against some whitelists """ + import sys from typing import Set diff --git a/script/make_utils/check_headers.py b/script/make_utils/check_headers.py index a8edfa628..2cb6f7cba 100644 --- a/script/make_utils/check_headers.py +++ b/script/make_utils/check_headers.py @@ -1,4 +1,5 @@ """Check that headers linked do indeed exist in target markdown files""" + import os from pathlib import Path diff --git a/script/make_utils/check_issues.py b/script/make_utils/check_issues.py index 137a5d445..33e5e1d97 100644 --- a/script/make_utils/check_issues.py +++ b/script/make_utils/check_issues.py @@ -1,4 +1,5 @@ """Check linked github issues states""" + import json import re import subprocess diff --git a/script/nbmake_utils/notebook_finalize.py b/script/nbmake_utils/notebook_finalize.py index 8f9275811..250e08c4c 100644 --- a/script/nbmake_utils/notebook_finalize.py +++ b/script/nbmake_utils/notebook_finalize.py @@ -1,4 +1,5 @@ """Finalize Jupyter notebooks.""" + import argparse import json from pathlib import Path diff --git a/src/concrete/__init__.py b/src/concrete/__init__.py index 18d9e7759..2a31e077a 100644 --- a/src/concrete/__init__.py +++ b/src/concrete/__init__.py @@ -1,4 +1,5 @@ """Top level import.""" + # Do not modify, this is to have a compatible namespace package # https://packaging.python.org/en/latest/guides/packaging-namespace-packages/ # #pkg-resources-style-namespace-packages diff --git a/src/concrete/ml/common/__init__.py b/src/concrete/ml/common/__init__.py index 2abb9511b..a6471e854 100644 --- a/src/concrete/ml/common/__init__.py +++ b/src/concrete/ml/common/__init__.py @@ -1,2 +1,3 @@ """Module for shared data structures and code.""" + from . import check_inputs, debugging, utils diff --git a/src/concrete/ml/common/debugging/__init__.py b/src/concrete/ml/common/debugging/__init__.py index 39f44a259..d05956f03 100644 --- a/src/concrete/ml/common/debugging/__init__.py +++ b/src/concrete/ml/common/debugging/__init__.py @@ -1,2 +1,3 @@ """Module for debugging.""" + from .custom_assert import assert_false, assert_not_reached, assert_true diff --git a/src/concrete/ml/common/debugging/custom_assert.py b/src/concrete/ml/common/debugging/custom_assert.py index dfb62a4c6..d66e46cd7 100644 --- a/src/concrete/ml/common/debugging/custom_assert.py +++ b/src/concrete/ml/common/debugging/custom_assert.py @@ -1,4 +1,5 @@ """Provide some variants of assert.""" + from typing import Type diff --git a/src/concrete/ml/common/serialization/__init__.py b/src/concrete/ml/common/serialization/__init__.py index ee71e9136..7deb2e158 100644 --- a/src/concrete/ml/common/serialization/__init__.py +++ b/src/concrete/ml/common/serialization/__init__.py @@ -1,4 +1,5 @@ """Serialization module.""" + import os from torch.nn.modules import activation diff --git a/src/concrete/ml/common/serialization/decoder.py b/src/concrete/ml/common/serialization/decoder.py index c02995e02..f4ca08311 100644 --- a/src/concrete/ml/common/serialization/decoder.py +++ b/src/concrete/ml/common/serialization/decoder.py @@ -1,4 +1,5 @@ """Custom decoder for serialization.""" + import inspect import json from typing import Any, Dict, Type diff --git a/src/concrete/ml/common/serialization/dumpers.py b/src/concrete/ml/common/serialization/dumpers.py index f5c53d143..0c8b8f21a 100644 --- a/src/concrete/ml/common/serialization/dumpers.py +++ b/src/concrete/ml/common/serialization/dumpers.py @@ -1,4 +1,5 @@ """Dump functions for serialization.""" + import json from typing import Any, TextIO diff --git a/src/concrete/ml/common/serialization/encoder.py b/src/concrete/ml/common/serialization/encoder.py index bb8f396d2..b84d35987 100644 --- a/src/concrete/ml/common/serialization/encoder.py +++ b/src/concrete/ml/common/serialization/encoder.py @@ -1,4 +1,5 @@ """Custom encoder for serialization.""" + import inspect import json from json.encoder import _make_iterencode # type: ignore[attr-defined] diff --git a/src/concrete/ml/common/serialization/loaders.py b/src/concrete/ml/common/serialization/loaders.py index 224f73eab..d312dcf4f 100644 --- a/src/concrete/ml/common/serialization/loaders.py +++ b/src/concrete/ml/common/serialization/loaders.py @@ -1,4 +1,5 @@ """Load functions for serialization.""" + import json from typing import IO, Any, Union diff --git a/src/concrete/ml/deployment/__init__.py b/src/concrete/ml/deployment/__init__.py index 9aaa966e8..0099031fb 100644 --- a/src/concrete/ml/deployment/__init__.py +++ b/src/concrete/ml/deployment/__init__.py @@ -1,2 +1,3 @@ """Module for deployment of the FHE model.""" + from .fhe_client_server import FHEModelClient, FHEModelDev, FHEModelServer diff --git a/src/concrete/ml/deployment/utils.py b/src/concrete/ml/deployment/utils.py index 5829b34cb..30d241029 100644 --- a/src/concrete/ml/deployment/utils.py +++ b/src/concrete/ml/deployment/utils.py @@ -3,6 +3,7 @@ - Check if connection possible - Wait for connection to be available (with timeout) """ + import subprocess import time from pathlib import Path diff --git a/src/concrete/ml/onnx/onnx_utils.py b/src/concrete/ml/onnx/onnx_utils.py index 60a8cb02c..81091cd86 100644 --- a/src/concrete/ml/onnx/onnx_utils.py +++ b/src/concrete/ml/onnx/onnx_utils.py @@ -1,4 +1,5 @@ """Utils to interpret an ONNX model with numpy.""" + # Utils to interpret an ONNX model with numpy. diff --git a/src/concrete/ml/pandas/__init__.py b/src/concrete/ml/pandas/__init__.py index f54414f3f..179f03991 100644 --- a/src/concrete/ml/pandas/__init__.py +++ b/src/concrete/ml/pandas/__init__.py @@ -1,4 +1,5 @@ """Public API for encrypted data-frames.""" + from pathlib import Path from typing import Hashable, Optional, Sequence, Tuple, Union diff --git a/src/concrete/ml/pandas/_development.py b/src/concrete/ml/pandas/_development.py index 01e5f755d..ab5e3801f 100644 --- a/src/concrete/ml/pandas/_development.py +++ b/src/concrete/ml/pandas/_development.py @@ -1,4 +1,5 @@ """Define development methods for generating client/server files.""" + import itertools from functools import partial from pathlib import Path diff --git a/src/concrete/ml/pandas/_operators.py b/src/concrete/ml/pandas/_operators.py index 4e8db613a..432bcf491 100644 --- a/src/concrete/ml/pandas/_operators.py +++ b/src/concrete/ml/pandas/_operators.py @@ -1,4 +1,5 @@ """Implement Pandas operators in FHE using encrypted data-frames.""" + from typing import Any, Dict, Hashable, List, Optional, Sequence, Tuple, Union import numpy diff --git a/src/concrete/ml/pandas/_processing.py b/src/concrete/ml/pandas/_processing.py index 056b3c966..f77a97952 100644 --- a/src/concrete/ml/pandas/_processing.py +++ b/src/concrete/ml/pandas/_processing.py @@ -1,4 +1,5 @@ """Define pre-processing and post-processing steps for encrypted data-frames.""" + import copy from collections import defaultdict from typing import Dict, List, Tuple diff --git a/src/concrete/ml/pandas/_utils.py b/src/concrete/ml/pandas/_utils.py index 2c7a6b7f6..388c9fc60 100644 --- a/src/concrete/ml/pandas/_utils.py +++ b/src/concrete/ml/pandas/_utils.py @@ -1,4 +1,5 @@ """Define utility functions for encrypted data-frames.""" + import functools from typing import List, Optional, Tuple, Union diff --git a/src/concrete/ml/pandas/client_engine.py b/src/concrete/ml/pandas/client_engine.py index 37e625119..98fd5cbbe 100644 --- a/src/concrete/ml/pandas/client_engine.py +++ b/src/concrete/ml/pandas/client_engine.py @@ -1,4 +1,5 @@ """Define the framework used for managing keys (encrypt, decrypt) for encrypted data-frames.""" + from pathlib import Path from typing import Optional, Union diff --git a/src/concrete/ml/pandas/dataframe.py b/src/concrete/ml/pandas/dataframe.py index d4716702d..18746836b 100644 --- a/src/concrete/ml/pandas/dataframe.py +++ b/src/concrete/ml/pandas/dataframe.py @@ -1,4 +1,5 @@ """Define the encrypted data-frame framework.""" + import json from pathlib import Path from typing import Dict, Hashable, List, Optional, Sequence, Tuple, Union diff --git a/src/concrete/ml/pytest/__init__.py b/src/concrete/ml/pytest/__init__.py index 577895a2b..76047a2ce 100644 --- a/src/concrete/ml/pytest/__init__.py +++ b/src/concrete/ml/pytest/__init__.py @@ -1,2 +1,3 @@ """Module which is used to contain common functions for pytest.""" + from . import torch_models, utils diff --git a/src/concrete/ml/pytest/torch_models.py b/src/concrete/ml/pytest/torch_models.py index d2b41e4d9..cb87f4bf6 100644 --- a/src/concrete/ml/pytest/torch_models.py +++ b/src/concrete/ml/pytest/torch_models.py @@ -867,9 +867,7 @@ def __init__(self, input_output, activation_function, n_bits=2, disable_bit_chec n_bits_weights = n_bits # Generate the pattern 0, 1, ..., 2^N-1, 0, 1, .. 2^N-1, 0, 1.. - all_weights = numpy.mod( - numpy.arange(numpy.prod(self.fc1.weight.shape)), 2**n_bits_weights - ) + all_weights = numpy.mod(numpy.arange(numpy.prod(self.fc1.weight.shape)), 2**n_bits_weights) # Shuffle the pattern and reshape to weight shape numpy.random.shuffle(all_weights) diff --git a/src/concrete/ml/pytest/utils.py b/src/concrete/ml/pytest/utils.py index 6ab70488e..f97a34719 100644 --- a/src/concrete/ml/pytest/utils.py +++ b/src/concrete/ml/pytest/utils.py @@ -1,4 +1,5 @@ """Common functions or lists for test files, which can't be put in fixtures.""" + import copy import io from functools import partial diff --git a/src/concrete/ml/quantization/__init__.py b/src/concrete/ml/quantization/__init__.py index f9c94793e..b248e4db4 100644 --- a/src/concrete/ml/quantization/__init__.py +++ b/src/concrete/ml/quantization/__init__.py @@ -1,4 +1,5 @@ """Modules for quantization.""" + from .base_quantized_op import QuantizedOp from .post_training import ( PostTrainingAffineQuantization, diff --git a/src/concrete/ml/quantization/base_quantized_op.py b/src/concrete/ml/quantization/base_quantized_op.py index 31c3df90b..903fc4767 100644 --- a/src/concrete/ml/quantization/base_quantized_op.py +++ b/src/concrete/ml/quantization/base_quantized_op.py @@ -237,14 +237,14 @@ def dump_dict(self) -> Dict: metadata["_input_idx_to_params_name"] = self._input_idx_to_params_name metadata["_params_that_are_onnx_inputs"] = self._params_that_are_onnx_inputs metadata["_params_that_are_onnx_var_inputs"] = self._params_that_are_onnx_var_inputs - metadata[ - "_params_that_are_required_onnx_inputs" - ] = self._params_that_are_required_onnx_inputs + metadata["_params_that_are_required_onnx_inputs"] = ( + self._params_that_are_required_onnx_inputs + ) metadata["_has_attr"] = self._has_attr metadata["_inputs_not_quantized"] = self._inputs_not_quantized - metadata[ - "quantize_inputs_with_model_outputs_precision" - ] = self.quantize_inputs_with_model_outputs_precision + metadata["quantize_inputs_with_model_outputs_precision"] = ( + self.quantize_inputs_with_model_outputs_precision + ) metadata["produces_graph_output"] = self.produces_graph_output metadata["produces_raw_output"] = self.produces_raw_output metadata["error_tracker"] = self.error_tracker diff --git a/src/concrete/ml/quantization/qat_quantizers.py b/src/concrete/ml/quantization/qat_quantizers.py index 36a916546..a3705966c 100644 --- a/src/concrete/ml/quantization/qat_quantizers.py +++ b/src/concrete/ml/quantization/qat_quantizers.py @@ -1,4 +1,5 @@ """Custom Quantization Aware Training Brevitas quantizers.""" + from brevitas.quant.scaled_int import ( IntQuant, MaxStatsScaling, diff --git a/src/concrete/ml/quantization/quantized_module.py b/src/concrete/ml/quantization/quantized_module.py index 07635c60f..3b61ac816 100644 --- a/src/concrete/ml/quantization/quantized_module.py +++ b/src/concrete/ml/quantization/quantized_module.py @@ -1,4 +1,5 @@ """QuantizedModule API.""" + import copy import re from functools import partial @@ -145,7 +146,7 @@ def set_reduce_sum_copy(self): to copy the inputs with a PBS to avoid it. """ assert self.quant_layers_dict is not None - for (_, quantized_op) in self.quant_layers_dict.values(): + for _, quantized_op in self.quant_layers_dict.values(): if isinstance(quantized_op, QuantizedReduceSum): quantized_op.copy_inputs = True @@ -369,10 +370,10 @@ def forward( debug_value_tracker: Dict[ str, Dict[Union[int, str], Optional[ONNXOpInputOutputType]] ] = {} - for (_, layer) in self.quant_layers_dict.values(): + for _, layer in self.quant_layers_dict.values(): layer.debug_value_tracker = debug_value_tracker q_y_pred = self.quantized_forward(*q_x, fhe="disable") - for (_, layer) in self.quant_layers_dict.values(): + for _, layer in self.quant_layers_dict.values(): layer.debug_value_tracker = None # De-quantize the output predicted values y_pred = self.dequantize_output(*to_tuple(q_y_pred)) @@ -767,7 +768,7 @@ def bitwidth_and_range_report( return None op_names_to_report: Dict[str, Dict[str, Union[Tuple[int, ...], int]]] = {} - for (_, op_inst) in self.quant_layers_dict.values(): + for _, op_inst in self.quant_layers_dict.values(): # Get the value range of this tag and all its subtags # The potential tags for this op start with the op instance name # and are, sometimes, followed by a subtag starting with a period: diff --git a/src/concrete/ml/quantization/quantized_module_passes.py b/src/concrete/ml/quantization/quantized_module_passes.py index c4e6bfb95..4d5da2ab2 100644 --- a/src/concrete/ml/quantization/quantized_module_passes.py +++ b/src/concrete/ml/quantization/quantized_module_passes.py @@ -1,4 +1,5 @@ """Optimization passes for QuantizedModules.""" + from collections import defaultdict from typing import DefaultDict, Dict, List, Optional, Tuple @@ -101,7 +102,7 @@ def compute_op_predecessors(self) -> PredecessorsType: # Initialize the list of predecessors with tensors that are graph inputs predecessors: PredecessorsType = defaultdict(list) - for (node_inputs, node_op) in self._qmodule.quant_layers_dict.values(): + for node_inputs, node_op in self._qmodule.quant_layers_dict.values(): # The first input node contains the encrypted data enc_input_node = node_inputs[0] @@ -168,7 +169,7 @@ def detect_patterns(self, predecessors: PredecessorsType) -> PatternDict: valid_paths: PatternDict = {} # pylint: disable-next=too-many-nested-blocks - for (_, node_op) in self._qmodule.quant_layers_dict.values(): + for _, node_op in self._qmodule.quant_layers_dict.values(): # Only work with supported nodes that have a single # encrypted input (not supporting enc x enc matmul) if ( diff --git a/src/concrete/ml/quantization/quantizers.py b/src/concrete/ml/quantization/quantizers.py index 6e194200b..280764543 100644 --- a/src/concrete/ml/quantization/quantizers.py +++ b/src/concrete/ml/quantization/quantizers.py @@ -1,4 +1,5 @@ """Quantization utilities for a numpy array/tensor.""" + # pylint: disable=too-many-lines from __future__ import annotations diff --git a/src/concrete/ml/search_parameters/__init__.py b/src/concrete/ml/search_parameters/__init__.py index 4ecfbdcec..2ed29ce06 100644 --- a/src/concrete/ml/search_parameters/__init__.py +++ b/src/concrete/ml/search_parameters/__init__.py @@ -1,2 +1,3 @@ """Modules for `p_error` search.""" + from .p_error_search import BinarySearch diff --git a/src/concrete/ml/search_parameters/p_error_search.py b/src/concrete/ml/search_parameters/p_error_search.py index d0e9ce4ed..148413533 100644 --- a/src/concrete/ml/search_parameters/p_error_search.py +++ b/src/concrete/ml/search_parameters/p_error_search.py @@ -50,6 +50,7 @@ If we don't reach the convergence, a user warning is raised. """ + import warnings from collections import OrderedDict from pathlib import Path @@ -478,9 +479,11 @@ def run( self.history.append( OrderedDict( { - k: sum((d[k] for d in simulation_data), []) - if isinstance(simulation_data[0][k], list) - else simulation_data[0][k] + k: ( + sum((d[k] for d in simulation_data), []) + if isinstance(simulation_data[0][k], list) + else simulation_data[0][k] + ) for k in simulation_data[0] } ) diff --git a/src/concrete/ml/sklearn/__init__.py b/src/concrete/ml/sklearn/__init__.py index 3144d8d43..83acbf91c 100644 --- a/src/concrete/ml/sklearn/__init__.py +++ b/src/concrete/ml/sklearn/__init__.py @@ -1,4 +1,5 @@ """Import sklearn models.""" + from typing import Dict, List, Optional, Union from ..common.debugging.custom_assert import assert_true diff --git a/src/concrete/ml/sklearn/base.py b/src/concrete/ml/sklearn/base.py index d13da2394..cf8a8e3e1 100644 --- a/src/concrete/ml/sklearn/base.py +++ b/src/concrete/ml/sklearn/base.py @@ -1,4 +1,5 @@ """Base classes for all estimators.""" + from __future__ import annotations import copy diff --git a/src/concrete/ml/sklearn/glm.py b/src/concrete/ml/sklearn/glm.py index f82d5f257..1d8233093 100644 --- a/src/concrete/ml/sklearn/glm.py +++ b/src/concrete/ml/sklearn/glm.py @@ -1,4 +1,5 @@ """Implement sklearn's Generalized Linear Models (GLM).""" + from __future__ import annotations from abc import abstractmethod diff --git a/src/concrete/ml/sklearn/linear_model.py b/src/concrete/ml/sklearn/linear_model.py index afd02991d..45218849c 100644 --- a/src/concrete/ml/sklearn/linear_model.py +++ b/src/concrete/ml/sklearn/linear_model.py @@ -1,4 +1,5 @@ """Implement sklearn linear model.""" + import itertools import time import warnings diff --git a/src/concrete/ml/sklearn/neighbors.py b/src/concrete/ml/sklearn/neighbors.py index e4101eeef..727529419 100644 --- a/src/concrete/ml/sklearn/neighbors.py +++ b/src/concrete/ml/sklearn/neighbors.py @@ -1,4 +1,5 @@ """Implement sklearn neighbors model.""" + from typing import Any, Dict, Union import numpy diff --git a/src/concrete/ml/sklearn/qnn_module.py b/src/concrete/ml/sklearn/qnn_module.py index 6c63cb92c..cadc9e542 100644 --- a/src/concrete/ml/sklearn/qnn_module.py +++ b/src/concrete/ml/sklearn/qnn_module.py @@ -1,4 +1,5 @@ """Sparse Quantized Neural Network torch module.""" + from typing import Set, Type import brevitas.nn as qnn @@ -112,9 +113,9 @@ def __init__( weight_narrow_range=quant_narrow, narrow_range=quant_narrow, signed=quant_signed, - weight_quant=Int8WeightPerTensorPoT - if power_of_two_scaling - else Int8WeightPerTensorFloat, + weight_quant=( + Int8WeightPerTensorPoT if power_of_two_scaling else Int8WeightPerTensorFloat + ), ) self.features.add_module(quant_name, quantizer) diff --git a/src/concrete/ml/sklearn/rf.py b/src/concrete/ml/sklearn/rf.py index f4521bf06..e9c119c19 100644 --- a/src/concrete/ml/sklearn/rf.py +++ b/src/concrete/ml/sklearn/rf.py @@ -1,4 +1,5 @@ """Implement RandomForest models.""" + from typing import Any, Dict, Union import numpy diff --git a/src/concrete/ml/sklearn/svm.py b/src/concrete/ml/sklearn/svm.py index 1636a0061..509500b3f 100644 --- a/src/concrete/ml/sklearn/svm.py +++ b/src/concrete/ml/sklearn/svm.py @@ -1,4 +1,5 @@ """Implement Support Vector Machine.""" + from typing import Any, Dict import sklearn.svm diff --git a/src/concrete/ml/sklearn/tree.py b/src/concrete/ml/sklearn/tree.py index b496d4e47..42965cdce 100644 --- a/src/concrete/ml/sklearn/tree.py +++ b/src/concrete/ml/sklearn/tree.py @@ -1,4 +1,5 @@ """Implement DecisionTree models.""" + from typing import Any, Dict, Union import numpy diff --git a/src/concrete/ml/sklearn/tree_to_numpy.py b/src/concrete/ml/sklearn/tree_to_numpy.py index b50944319..14f4ab732 100644 --- a/src/concrete/ml/sklearn/tree_to_numpy.py +++ b/src/concrete/ml/sklearn/tree_to_numpy.py @@ -1,4 +1,5 @@ """Implements the conversion of a tree model to a numpy function.""" + import math import warnings from typing import Callable, List, Optional, Tuple diff --git a/src/concrete/ml/sklearn/xgb.py b/src/concrete/ml/sklearn/xgb.py index 8f3925fd7..b1c2f403e 100644 --- a/src/concrete/ml/sklearn/xgb.py +++ b/src/concrete/ml/sklearn/xgb.py @@ -1,4 +1,5 @@ """Implements XGBoost models.""" + import platform import warnings from typing import Any, Dict, List, Optional, Tuple, Union diff --git a/src/concrete/ml/torch/__init__.py b/src/concrete/ml/torch/__init__.py index 1071631be..0f1620f65 100644 --- a/src/concrete/ml/torch/__init__.py +++ b/src/concrete/ml/torch/__init__.py @@ -1,2 +1,3 @@ """Modules for torch to numpy conversion.""" + from .numpy_module import NumpyModule diff --git a/src/concrete/ml/torch/numpy_module.py b/src/concrete/ml/torch/numpy_module.py index 0387140bf..ca94dab12 100644 --- a/src/concrete/ml/torch/numpy_module.py +++ b/src/concrete/ml/torch/numpy_module.py @@ -1,4 +1,5 @@ """A torch to numpy module.""" + from pathlib import Path from typing import Optional, Tuple, Union diff --git a/src/concrete/ml/version.py b/src/concrete/ml/version.py index 2829fbfe5..97a575f9e 100644 --- a/src/concrete/ml/version.py +++ b/src/concrete/ml/version.py @@ -1,3 +1,4 @@ """File to manage the version of the package.""" + # Auto-generated by "make set_version" do not modify __version__ = "1.5.0" diff --git a/tests/common/test_custom_assert.py b/tests/common/test_custom_assert.py index 32ddfa245..9b356427e 100644 --- a/tests/common/test_custom_assert.py +++ b/tests/common/test_custom_assert.py @@ -1,4 +1,5 @@ """Test custom assert functions.""" + import pytest from concrete.ml.common.debugging.custom_assert import assert_false, assert_not_reached, assert_true diff --git a/tests/common/test_pbs_error_probability_settings.py b/tests/common/test_pbs_error_probability_settings.py index 1eb874800..85e4128bd 100644 --- a/tests/common/test_pbs_error_probability_settings.py +++ b/tests/common/test_pbs_error_probability_settings.py @@ -1,4 +1,5 @@ """Tests for the sklearn linear models.""" + import warnings from inspect import signature diff --git a/tests/common/test_serialization.py b/tests/common/test_serialization.py index a38fb80a4..a39adfc03 100644 --- a/tests/common/test_serialization.py +++ b/tests/common/test_serialization.py @@ -3,6 +3,7 @@ Here we test the custom dump(s)/load(s) functions for all supported objects. We also check that serializing unsupported object types properly throws an error. """ + import inspect import io import warnings diff --git a/tests/common/test_similarity_score.py b/tests/common/test_similarity_score.py index 9b5559458..efc82e313 100644 --- a/tests/common/test_similarity_score.py +++ b/tests/common/test_similarity_score.py @@ -1,4 +1,5 @@ """Tests for the r2 score test """ + import numpy import pytest diff --git a/tests/common/test_skearn_model_lists.py b/tests/common/test_skearn_model_lists.py index 306210982..7edaf77d0 100644 --- a/tests/common/test_skearn_model_lists.py +++ b/tests/common/test_skearn_model_lists.py @@ -1,4 +1,5 @@ """Tests lists of models in Concrete ML.""" + from concrete.ml.pytest.utils import MODELS_AND_DATASETS, UNIQUE_MODELS_AND_DATASETS from concrete.ml.sklearn import ( _get_sklearn_all_models, diff --git a/tests/common/test_utils.py b/tests/common/test_utils.py index f94838f4e..6c0573938 100644 --- a/tests/common/test_utils.py +++ b/tests/common/test_utils.py @@ -1,4 +1,5 @@ """Test utils functions.""" + import numpy import pandas import pytest diff --git a/tests/common/test_version.py b/tests/common/test_version.py index c2d57bc98..b036d07ad 100644 --- a/tests/common/test_version.py +++ b/tests/common/test_version.py @@ -1,4 +1,5 @@ """Testing the version of the package""" + import concrete from concrete import ml diff --git a/tests/deployment/test_deployment.py b/tests/deployment/test_deployment.py index 3567363fc..3e215e1e5 100644 --- a/tests/deployment/test_deployment.py +++ b/tests/deployment/test_deployment.py @@ -1,4 +1,5 @@ """Test deployment.""" + import io import time import uuid diff --git a/tests/onnx/test_onnx_ops_impl.py b/tests/onnx/test_onnx_ops_impl.py index fcb029d1d..42922d9c2 100644 --- a/tests/onnx/test_onnx_ops_impl.py +++ b/tests/onnx/test_onnx_ops_impl.py @@ -1,4 +1,5 @@ """Test custom assert functions.""" + import numpy import pytest diff --git a/tests/pandas/test_pandas.py b/tests/pandas/test_pandas.py index 805359e84..8a5084135 100644 --- a/tests/pandas/test_pandas.py +++ b/tests/pandas/test_pandas.py @@ -1,4 +1,5 @@ """Tests the encrypted data-frame API abd its coherence with Pandas""" + import re import shutil import tempfile diff --git a/tests/pytest/test_pytest_utils.py b/tests/pytest/test_pytest_utils.py index df4a6ccf4..503dad5c4 100644 --- a/tests/pytest/test_pytest_utils.py +++ b/tests/pytest/test_pytest_utils.py @@ -1,4 +1,5 @@ """Test pytest utility functions.""" + import numpy import pytest from numpy.random import RandomState diff --git a/tests/quantization/test_compilation.py b/tests/quantization/test_compilation.py index e482a57b3..46e8569b6 100644 --- a/tests/quantization/test_compilation.py +++ b/tests/quantization/test_compilation.py @@ -1,4 +1,5 @@ """Test Neural Networks compilations""" + import numpy import onnx import pytest diff --git a/tests/quantization/test_quantized_module.py b/tests/quantization/test_quantized_module.py index 75f65ca2a..0599dcad0 100644 --- a/tests/quantization/test_quantized_module.py +++ b/tests/quantization/test_quantized_module.py @@ -1,4 +1,5 @@ """Tests for the quantized module.""" + from functools import partial import numpy diff --git a/tests/quantization/test_quantizers.py b/tests/quantization/test_quantizers.py index c682c0e84..abafc4437 100644 --- a/tests/quantization/test_quantizers.py +++ b/tests/quantization/test_quantizers.py @@ -1,4 +1,5 @@ """Tests for the quantized array/tensors.""" + import numpy import pytest diff --git a/tests/seeding/test_seeding.py b/tests/seeding/test_seeding.py index 57fd4ad52..49450b276 100644 --- a/tests/seeding/test_seeding.py +++ b/tests/seeding/test_seeding.py @@ -1,4 +1,5 @@ """Tests for the torch to numpy module.""" + import inspect import random import warnings diff --git a/tests/seeding/test_seeding_system_file_a.py b/tests/seeding/test_seeding_system_file_a.py index 8bbac1bab..feeffedee 100644 --- a/tests/seeding/test_seeding_system_file_a.py +++ b/tests/seeding/test_seeding_system_file_a.py @@ -1,4 +1,5 @@ """Tests for the torch to numpy module.""" + import random import numpy diff --git a/tests/seeding/test_seeding_system_file_b.py b/tests/seeding/test_seeding_system_file_b.py index 8bbac1bab..feeffedee 100644 --- a/tests/seeding/test_seeding_system_file_b.py +++ b/tests/seeding/test_seeding_system_file_b.py @@ -1,4 +1,5 @@ """Tests for the torch to numpy module.""" + import random import numpy diff --git a/tests/sklearn/test_common.py b/tests/sklearn/test_common.py index 7b70e9cba..4f7889878 100644 --- a/tests/sklearn/test_common.py +++ b/tests/sklearn/test_common.py @@ -1,4 +1,5 @@ """Tests common to all sklearn models.""" + import inspect import warnings diff --git a/tests/sklearn/test_fhe_training.py b/tests/sklearn/test_fhe_training.py index eee4c9281..5bab26079 100644 --- a/tests/sklearn/test_fhe_training.py +++ b/tests/sklearn/test_fhe_training.py @@ -1,4 +1,5 @@ """Tests training in FHE.""" + import re import warnings diff --git a/tests/sklearn/test_qnn.py b/tests/sklearn/test_qnn.py index 89f190184..e0123c06f 100644 --- a/tests/sklearn/test_qnn.py +++ b/tests/sklearn/test_qnn.py @@ -1,4 +1,5 @@ """Tests for the FHE sklearn compatible NNs.""" + from copy import deepcopy from itertools import product @@ -566,7 +567,7 @@ def test_power_of_two_scaling( # Count the number of patterns that were optimized with roundPBS num_round_pbs_layers = 0 - for (_, node_op) in model.quantized_module_.quant_layers_dict.values(): + for _, node_op in model.quantized_module_.quant_layers_dict.values(): if isinstance(node_op, QuantizedMixingOp): num_round_pbs_layers += 1 if node_op.rounding_threshold_bits is not None else 0 lsbs_to_remove = ( @@ -616,7 +617,7 @@ def test_power_of_two_scaling( # Remove rounding in the network to perform inference without the optimization. # We expect a network that was optimized with the power-of-two adapter # to be exactly correct to the non-optimized one - for (_, node_op) in model.quantized_module_.quant_layers_dict.values(): + for _, node_op in model.quantized_module_.quant_layers_dict.values(): if isinstance(node_op, QuantizedMixingOp): node_op.rounding_threshold_bits = None node_op.lsbs_to_remove = None diff --git a/tests/torch/test_brevitas_qat.py b/tests/torch/test_brevitas_qat.py index cf6f70c7e..1c24d4ec5 100644 --- a/tests/torch/test_brevitas_qat.py +++ b/tests/torch/test_brevitas_qat.py @@ -532,7 +532,7 @@ def test_brevitas_power_of_two( pot_should_be_applied = not manual_rounding and power_of_two # Count the number of patterns that were optimized with roundPBS num_round_pbs_layers = 0 - for (_, node_op) in quantized_module.quant_layers_dict.values(): + for _, node_op in quantized_module.quant_layers_dict.values(): if isinstance(node_op, QuantizedMixingOp): num_round_pbs_layers += 1 if node_op.rounding_threshold_bits is not None else 0 if pot_should_be_applied: @@ -590,7 +590,7 @@ def test_brevitas_power_of_two( # Remove rounding in the network to perform inference without the optimization. # We expect a network that was optimized with the power-of-two adapter # to be exactly correct to the non-optimized one - for (_, node_op) in quantized_module.quant_layers_dict.values(): + for _, node_op in quantized_module.quant_layers_dict.values(): if isinstance(node_op, QuantizedMixingOp): node_op.rounding_threshold_bits = None node_op.lsbs_to_remove = None diff --git a/tests/torch/test_compile_keras.py b/tests/torch/test_compile_keras.py index 8bf4b065b..257d4c11d 100644 --- a/tests/torch/test_compile_keras.py +++ b/tests/torch/test_compile_keras.py @@ -1,4 +1,5 @@ """Tests for Keras models (by conversion to ONNX).""" + import tempfile import warnings from pathlib import Path diff --git a/tests/torch/test_compile_torch.py b/tests/torch/test_compile_torch.py index 8a66434f1..16770c88c 100644 --- a/tests/torch/test_compile_torch.py +++ b/tests/torch/test_compile_torch.py @@ -1,4 +1,5 @@ """Tests for the torch to numpy module.""" + # pylint: disable=too-many-lines import io import tempfile diff --git a/tests/torch/test_torch_to_numpy.py b/tests/torch/test_torch_to_numpy.py index ab33153b9..1b121d1b0 100644 --- a/tests/torch/test_torch_to_numpy.py +++ b/tests/torch/test_torch_to_numpy.py @@ -1,4 +1,5 @@ """Tests for the torch to numpy module.""" + from functools import partial import numpy diff --git a/tests/torch/test_training.py b/tests/torch/test_training.py index d0ff304ee..2c01f2c21 100644 --- a/tests/torch/test_training.py +++ b/tests/torch/test_training.py @@ -1,4 +1,5 @@ """Tests for FHE training.""" + import numpy import torch from sklearn import datasets diff --git a/tests/virtual_lib/test_virtual_lib.py b/tests/virtual_lib/test_virtual_lib.py index 47d73ebb5..7fad61e5e 100644 --- a/tests/virtual_lib/test_virtual_lib.py +++ b/tests/virtual_lib/test_virtual_lib.py @@ -1,4 +1,5 @@ """Test file for FHE simulation specific tests.""" + import numpy from concrete.fhe.compilation.circuit import Circuit from concrete.fhe.compilation.compiler import Compiler diff --git a/use_case_examples/cifar/cifar_brevitas_finetuning/cifar_utils.py b/use_case_examples/cifar/cifar_brevitas_finetuning/cifar_utils.py index 6136a0c22..56c2397c0 100644 --- a/use_case_examples/cifar/cifar_brevitas_finetuning/cifar_utils.py +++ b/use_case_examples/cifar/cifar_brevitas_finetuning/cifar_utils.py @@ -416,7 +416,6 @@ def torch_inference( device: str = "cpu", verbose: bool = False, ) -> float: - """Returns the `top_k` accuracy. Args: @@ -465,7 +464,6 @@ def fhe_compatibility(model: Callable, data: DataLoader) -> Callable: def mapping_keys(pre_trained_weights: Dict, model: nn.Module, device: str) -> nn.Module: - """ Initialize the quantized model with pre-trained fp32 weights. diff --git a/use_case_examples/cifar/cifar_brevitas_training/evaluate_torch_cml.py b/use_case_examples/cifar/cifar_brevitas_training/evaluate_torch_cml.py index dd5f158ac..d3e2e8b00 100644 --- a/use_case_examples/cifar/cifar_brevitas_training/evaluate_torch_cml.py +++ b/use_case_examples/cifar/cifar_brevitas_training/evaluate_torch_cml.py @@ -118,9 +118,11 @@ def main(args): input_set, n_bits={"model_inputs": 8, "model_outputs": 8}, configuration=cfg, - rounding_threshold_bits={"n_bits": rounding_threshold_bits, "method": "EXACT"} - if rounding_threshold_bits is not None - else None, + rounding_threshold_bits=( + {"n_bits": rounding_threshold_bits, "method": "EXACT"} + if rounding_threshold_bits is not None + else None + ), ) # Print max bit-width in the circuit diff --git a/use_case_examples/deployment/breast_cancer_builtin/client.py b/use_case_examples/deployment/breast_cancer_builtin/client.py index d8ad52bb9..fa2c79af5 100644 --- a/use_case_examples/deployment/breast_cancer_builtin/client.py +++ b/use_case_examples/deployment/breast_cancer_builtin/client.py @@ -8,6 +8,7 @@ - Collect the data and decrypt it - De-quantize the decrypted results """ + import io import os from pathlib import Path diff --git a/use_case_examples/deployment/cifar_8_bit/client.py b/use_case_examples/deployment/cifar_8_bit/client.py index 2b2cdbf51..a35d669bb 100644 --- a/use_case_examples/deployment/cifar_8_bit/client.py +++ b/use_case_examples/deployment/cifar_8_bit/client.py @@ -8,6 +8,7 @@ - Collect the data and decrypt it - De-quantize the decrypted results """ + import io import os import sys diff --git a/use_case_examples/deployment/cifar_8_bit/compile.py b/use_case_examples/deployment/cifar_8_bit/compile.py index f7b5eac83..dc7579d41 100644 --- a/use_case_examples/deployment/cifar_8_bit/compile.py +++ b/use_case_examples/deployment/cifar_8_bit/compile.py @@ -1,4 +1,5 @@ """Load torch model, compiles it to FHE and exports it""" + import sys import time from pathlib import Path diff --git a/use_case_examples/deployment/sentiment_analysis/train.py b/use_case_examples/deployment/sentiment_analysis/train.py index 02624fa84..d396001d3 100644 --- a/use_case_examples/deployment/sentiment_analysis/train.py +++ b/use_case_examples/deployment/sentiment_analysis/train.py @@ -1,4 +1,5 @@ """Copy-pasted from use_case_examples/sentiment_analysis_with_transformer""" + import os os.environ["TRANSFORMERS_CACHE"] = "./hf_cache" diff --git a/use_case_examples/hybrid_model/infer_hybrid_llm_generate.py b/use_case_examples/hybrid_model/infer_hybrid_llm_generate.py index f4a8d64d6..5591fa54e 100644 --- a/use_case_examples/hybrid_model/infer_hybrid_llm_generate.py +++ b/use_case_examples/hybrid_model/infer_hybrid_llm_generate.py @@ -1,4 +1,5 @@ """Showcase for the hybrid model converter.""" + import json import os import time