Skip to content

Commit

Permalink
chore: update Concrete Python to 2.5.1
Browse files Browse the repository at this point in the history
- update Concrete Python to 2.5.1
- update mypy and solve some type hint issues
  • Loading branch information
fd0r committed Feb 12, 2024
1 parent 19ba423 commit db6d7b3
Show file tree
Hide file tree
Showing 12 changed files with 230 additions and 193 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ OPEN_PR="true"
# Force the installation of a Concrete Python version, which is very useful with nightly versions
# /!\ WARNING /!\: This version should NEVER be a wildcard as it might create some
# issues when trying to run it in the future.
CONCRETE_PYTHON_VERSION="concrete-python==2.5.0"
CONCRETE_PYTHON_VERSION="concrete-python==2.5.1"

# Force the installation of Concrete Python's latest version, release-candidates included
# CONCRETE_PYTHON_VERSION="$$(poetry run python \
Expand Down
10 changes: 5 additions & 5 deletions deps_licenses/licenses_linux_user.txt
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
Name, Version, License
GitPython, 3.1.41, BSD License
PyYAML, 6.0.1, MIT License
anyio, 4.2.0, MIT License
boto3, 1.34.37, Apache Software License
botocore, 1.34.37, Apache Software License
anyio, 3.7.1, MIT License
boto3, 1.34.38, Apache Software License
botocore, 1.34.38, Apache Software License
brevitas, 0.8.0, UNKNOWN
certifi, 2023.7.22, Mozilla Public License 2.0 (MPL 2.0)
charset-normalizer, 3.3.2, MIT License
click, 8.1.7, BSD License
coloredlogs, 15.0.1, MIT License
concrete-python, 2.5, BSD-3-Clause
concrete-python, 2.5.1, BSD-3-Clause
dependencies, 2.0.1, BSD License
dill, 0.3.8, BSD License
exceptiongroup, 1.2.0, MIT License
fastapi, 0.102.0, MIT License
fastapi, 0.103.2, MIT License
filelock, 3.13.1, The Unlicense (Unlicense)
flatbuffers, 23.5.26, Apache Software License
fsspec, 2024.2.0, BSD License
Expand Down
2 changes: 1 addition & 1 deletion deps_licenses/licenses_linux_user.txt.md5
Original file line number Diff line number Diff line change
@@ -1 +1 @@
f709427468f5be4ee6836603495fea72
a923947bfb17b658ab8efe61d5cafe96
10 changes: 6 additions & 4 deletions deps_licenses/licenses_mac_intel_user.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
Name, Version, License
GitPython, 3.1.41, BSD License
PyYAML, 6.0.1, MIT License
annotated-types, 0.6.0, MIT License
anyio, 4.2.0, MIT License
boto3, 1.34.37, Apache Software License
botocore, 1.34.37, Apache Software License
Expand All @@ -9,11 +10,11 @@ certifi, 2023.7.22, Mozilla Public License 2.0 (MPL 2.0)
charset-normalizer, 3.3.2, MIT License
click, 8.1.7, BSD License
coloredlogs, 15.0.1, MIT License
concrete-python, 2.5, BSD-3-Clause
concrete-python, 2.5.1, BSD-3-Clause
dependencies, 2.0.1, BSD License
dill, 0.3.8, BSD License
exceptiongroup, 1.2.0, MIT License
fastapi, 0.102.0, MIT License
fastapi, 0.109.2, MIT License
filelock, 3.13.1, The Unlicense (Unlicense)
flatbuffers, 23.5.26, Apache Software License
fsspec, 2024.2.0, BSD License
Expand All @@ -39,7 +40,8 @@ packaging, 23.2, Apache Software License; BSD License
pluggy, 1.4.0, MIT License
protobuf, 3.20.3, BSD-3-Clause
psutil, 5.9.8, BSD License
pydantic, 1.10.14, MIT License
pydantic, 2.6.1, MIT License
pydantic_core, 2.16.2, MIT License
pytest, 7.4.1, MIT License
pytest-json-report, 1.5.0, MIT
pytest-metadata, 3.1.0, Mozilla Public License 2.0 (MPL 2.0)
Expand All @@ -56,7 +58,7 @@ skops, 0.5.0, MIT
skorch, 0.11.0, new BSD 3-Clause
smmap, 5.0.1, BSD License
sniffio, 1.3.0, Apache Software License; MIT License
starlette, 0.27.0, BSD License
starlette, 0.36.3, BSD License
sympy, 1.12, BSD License
tabulate, 0.8.10, MIT License
threadpoolctl, 3.2.0, BSD License
Expand Down
13 changes: 7 additions & 6 deletions deps_licenses/licenses_mac_silicon_user.txt
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
Name, Version, License
GitPython, 3.1.41, BSD License
PyYAML, 6.0.1, MIT License
anyio, 4.2.0, MIT License
boto3, 1.34.37, Apache Software License
botocore, 1.34.37, Apache Software License
anyio, 3.7.1, MIT License
boto3, 1.34.38, Apache Software License
botocore, 1.34.38, Apache Software License
brevitas, 0.8.0, UNKNOWN
certifi, 2023.7.22, Mozilla Public License 2.0 (MPL 2.0)
charset-normalizer, 3.3.2, MIT License
click, 8.1.7, BSD License
coloredlogs, 15.0.1, MIT License
concrete-python, 2.5, BSD-3-Clause
concrete-python, 2.5.1, BSD-3-Clause
dependencies, 2.0.1, BSD License
dill, 0.3.8, BSD License
exceptiongroup, 1.2.0, MIT License
fastapi, 0.102.0, MIT License
fastapi, 0.103.2, MIT License
filelock, 3.13.1, The Unlicense (Unlicense)
flatbuffers, 23.5.26, Apache Software License
fsspec, 2024.2.0, BSD License
Expand Down Expand Up @@ -66,7 +66,8 @@ torch, 1.13.1, BSD License
tqdm, 4.66.1, MIT License; Mozilla Public License 2.0 (MPL 2.0)
transformers, 4.37.2, Apache Software License
typing_extensions, 4.5.0, Python Software Foundation License
urllib3, 2.0.7, MIT License
urllib3, 1.26.18, MIT License
uvicorn, 0.21.1, BSD License
xgboost, 1.6.2, Apache Software License
z3-solver, 4.12.5.0, MIT License
zipp, 3.17.0, MIT License
2 changes: 1 addition & 1 deletion deps_licenses/licenses_mac_silicon_user.txt.md5
Original file line number Diff line number Diff line change
@@ -1 +1 @@
f709427468f5be4ee6836603495fea72
a923947bfb17b658ab8efe61d5cafe96
248 changes: 122 additions & 126 deletions poetry.lock

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ readme = "README.md"
# Investigate if it is better to fix specific versions or use lower and upper bounds
# FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2665
python = ">=3.8.1,<3.11"
concrete-python = "2.5.0"
concrete-python = "2.5.1"
setuptools = "65.6.3"
skops = {version = "0.5.0"}
xgboost = "1.6.2"
Expand All @@ -53,7 +53,7 @@ protobuf = "3.20.3"

# Deployment
boto3 = "^1.23.5"
fastapi = "^0.102.0"
fastapi = "^0.103.2"
uvicorn = "^0.21.0"
tqdm = "^4.64.1"
transformers = "^4.36.0"
Expand All @@ -70,7 +70,7 @@ pylint = "^2.13.0"
pytest = "7.4.1"
pytest-cov = "^4.1.0"
pytest_codeblocks = "^0.14.0"
mypy = "^0.991"
mypy = "^1.8.0"
pydocstyle = "^6.1.1"
python-semantic-release = "^7.27.0"
semver = "^2.13.0"
Expand Down Expand Up @@ -113,9 +113,9 @@ flake8-bugbear = "23.2.13"
flake8 = "6.0.0"
sphinx = "6.1.3"
# The following was inspired from https://github.com/python-poetry/poetry/issues/8271
tensorflow = {version = "2.13.0"}
tensorflow = {version = "2.13.1"}
tensorflow-macos = {version = "2.13.0", platform = "darwin", markers = "platform_machine=='arm64'" }
tensorflow-metal = {version = "0.8.0", markers="sys_platform == 'darwin' and platform_machine == 'arm'"}
tensorflow-metal = {version = "1.1.0", markers="sys_platform == 'darwin' and platform_machine == 'arm'"}
tensorflow-io-gcs-filesystem = [
{version = "0.34.0", markers = "platform_machine!='arm64' or platform_system!='Darwin'" },
]
Expand Down
12 changes: 10 additions & 2 deletions src/concrete/ml/quantization/base_quantized_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,14 @@
UniformQuantizationParameters,
)

ONNXOpInputOutputType = Union[numpy.ndarray, QuantizedArray, None]
ONNXOpInputOutputType = Union[
numpy.ndarray,
QuantizedArray,
None,
bool,
int,
float,
]

ALL_QUANTIZED_OPS: Set[Type] = set()

Expand Down Expand Up @@ -969,7 +976,8 @@ def cnp_round(

# Update the lsbs_to_remove value in the dictionary
self.lsbs_to_remove[rounding_operation_id] = max(
self.lsbs_to_remove.get(rounding_operation_id, 0), computed_lsbs_to_remove
self.lsbs_to_remove.get(rounding_operation_id, 0),
computed_lsbs_to_remove,
)

# Rounding logic
Expand Down
32 changes: 20 additions & 12 deletions src/concrete/ml/quantization/post_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import numpy
import onnx
from concrete.fhe.tracing import Tracer
from onnx import numpy_helper

from ..common.debugging import assert_true
Expand Down Expand Up @@ -400,7 +401,9 @@ def _calibrate_layers_activation(
)

@abstractmethod
def _process_initializer(self, n_bits: int, values: numpy.ndarray) -> QuantizedArray:
def _process_initializer(
self, n_bits: int, values: Union[numpy.ndarray, float, int, bool]
) -> Union[QuantizedArray, RawOpOutput]:
"""Transform a constant tensor according to the model conversion mode.
The values supplied are floating point values that will be quantized.
Expand All @@ -410,7 +413,8 @@ def _process_initializer(self, n_bits: int, values: numpy.ndarray) -> QuantizedA
values (numpy.ndarray): Float values that initialize this tensor
Returns:
QuantizedArray: a quantized tensor with integer values on n_bits bits
Union[QuantizedArray, RawOpOutput]: a quantized tensor with integer
values on n_bits bits
"""

@abstractmethod
Expand Down Expand Up @@ -535,14 +539,13 @@ def _quantize_layers(self, *input_calibration_data: numpy.ndarray):
curr_cst_inputs[input_idx] = value
else:
# Initializers are ndarray or scalar
assert value is not None
assert isinstance(value, numpy.ndarray) or numpy.isscalar(value)
assert isinstance(value, (numpy.ndarray, float, int, bool))
curr_cst_inputs[input_idx] = self._process_initializer(
self.n_bits_op_weights, value
)
else:
# Initializers are ndarray or scalar
assert isinstance(value, numpy.ndarray) or numpy.isscalar(value)
assert isinstance(value, (numpy.ndarray, float, int, bool))
curr_cst_inputs[input_idx] = value

has_variable_inputs = (len(curr_inputs) - len(curr_cst_inputs)) > 0
Expand Down Expand Up @@ -865,7 +868,9 @@ def _process_layer(
True, quantized_op, *calibration_data, quantizers=quantizers
)

def _process_initializer(self, n_bits: int, values: numpy.ndarray):
def _process_initializer(
self, n_bits: int, values: Union[numpy.ndarray, float, int, bool]
) -> Union[QuantizedArray, RawOpOutput]:
"""Quantize a network constant tensor (a weights tensor).
The values supplied are floating point values that will be quantized.
Expand All @@ -875,13 +880,14 @@ def _process_initializer(self, n_bits: int, values: numpy.ndarray):
values (numpy.ndarray): Float values that initialize this tensor
Returns:
QuantizedArray: a quantized tensor with integer values on n_bits bits
Union[QuantizedArray, RawOpOutput]: a quantized tensor with integer
values on n_bits bits
"""

if isinstance(values, numpy.ndarray) and numpy.issubdtype(values.dtype, numpy.integer):
return values.view(RawOpOutput)

assert isinstance(values, (numpy.ndarray, float))
if not isinstance(values, (numpy.ndarray, Tracer)):
values = numpy.array(values)
is_signed = is_symmetric = self._check_distribution_is_symmetric_around_zero(values)

return QuantizedArray(
Expand Down Expand Up @@ -990,7 +996,9 @@ def _process_layer(
False, quantized_op, *calibration_data, quantizers=quantizers
)

def _process_initializer(self, n_bits: int, values: numpy.ndarray):
def _process_initializer(
self, n_bits: int, values: Union[numpy.ndarray, float, int, bool]
) -> Union[QuantizedArray, RawOpOutput]:
"""Process an already quantized weight tensor.
The values supplied are in floating point, but are discrete, in the sense
Expand All @@ -1003,8 +1011,8 @@ def _process_initializer(self, n_bits: int, values: numpy.ndarray):
values (numpy.ndarray): Discrete float values that initialize this tensor
Returns:
QuantizedArray: a quantized tensor with integer values on n_bits bits and with alpha as
the scaling factor.
Union[QuantizedArray, RawOpOutput]: a quantized tensor with integer values on
n_bits bits and with alpha as the scaling factor.
"""

# Assume that integer initializer op inputs are raw values that should not be quantized
Expand Down
Loading

0 comments on commit db6d7b3

Please sign in to comment.