From 5e6aecb8220e56ca52995a9b4451cffd43ed91f5 Mon Sep 17 00:00:00 2001 From: pinto0309 Date: Sat, 19 Oct 2024 14:04:05 +0900 Subject: [PATCH 1/2] Supports multi-batch quantization of image input --- README.md | 4 ++-- onnx2tf/__init__.py | 2 +- onnx2tf/onnx2tf.py | 12 ++++++++---- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index cae491d4..94d4e0ee 100644 --- a/README.md +++ b/README.md @@ -299,7 +299,7 @@ Video speed is adjusted approximately 50 times slower than actual speed. docker run --rm -it \ -v `pwd`:/workdir \ -w /workdir \ - ghcr.io/pinto0309/onnx2tf:1.26.1 + ghcr.io/pinto0309/onnx2tf:1.26.2 or @@ -307,7 +307,7 @@ Video speed is adjusted approximately 50 times slower than actual speed. docker run --rm -it \ -v `pwd`:/workdir \ -w /workdir \ - docker.io/pinto0309/onnx2tf:1.26.1 + docker.io/pinto0309/onnx2tf:1.26.2 or diff --git a/onnx2tf/__init__.py b/onnx2tf/__init__.py index 6fbb5f6a..598f1226 100644 --- a/onnx2tf/__init__.py +++ b/onnx2tf/__init__.py @@ -1,3 +1,3 @@ from onnx2tf.onnx2tf import convert, main -__version__ = '1.26.1' +__version__ = '1.26.2' diff --git a/onnx2tf/onnx2tf.py b/onnx2tf/onnx2tf.py index 5e9014e4..b77e8990 100644 --- a/onnx2tf/onnx2tf.py +++ b/onnx2tf/onnx2tf.py @@ -1626,8 +1626,9 @@ def sanitizing(node): mean, std, ] + elif custom_input_op_name_np_data_path is not None: - for param in custom_input_op_name_np_data_path: + for param, model_input in zip(custom_input_op_name_np_data_path, model.inputs): if len(param) != 4: error( "If you want to use custom input with the '-oiqt' option, " + @@ -1652,11 +1653,14 @@ def sanitizing(node): # representative_dataset_gen def representative_dataset_gen(): - for idx in range(data_count): + batch_size = model.inputs[0].shape[0] + if not isinstance(batch_size, int): + batch_size = 1 + for idx in range(0, data_count, batch_size): yield_data_dict = {} for model_input_name in model_input_name_list: calib_data, mean, std = calib_data_dict[model_input_name] - normalized_calib_data: np.ndarray = (calib_data[idx] - mean) / std + normalized_calib_data: np.ndarray = (calib_data[idx:idx+batch_size] - mean) / std yield_data_dict[model_input_name] = tf.cast(tf.convert_to_tensor(normalized_calib_data), tf.float32) yield yield_data_dict @@ -1708,7 +1712,7 @@ def representative_dataset_gen(): inf_type_input = tf.float32 else: inf_type_input = tf.int8 - + if output_quant_dtype == 'int8': inf_type_output = tf.int8 elif output_quant_dtype == 'uint8': From b10e8c6feb5e20425bab6309dc666382ee6af003 Mon Sep 17 00:00:00 2001 From: pinto0309 Date: Sat, 19 Oct 2024 14:07:43 +0900 Subject: [PATCH 2/2] update --- onnx2tf/onnx2tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnx2tf/onnx2tf.py b/onnx2tf/onnx2tf.py index b77e8990..b58d570e 100644 --- a/onnx2tf/onnx2tf.py +++ b/onnx2tf/onnx2tf.py @@ -1628,7 +1628,7 @@ def sanitizing(node): ] elif custom_input_op_name_np_data_path is not None: - for param, model_input in zip(custom_input_op_name_np_data_path, model.inputs): + for param in custom_input_op_name_np_data_path: if len(param) != 4: error( "If you want to use custom input with the '-oiqt' option, " +