From 79180648bed83bb0c598bb0a8f8b4af2521829b6 Mon Sep 17 00:00:00 2001 From: yeliang2258 <1047690002@qq.com> Date: Tue, 14 Mar 2023 12:01:26 +0000 Subject: [PATCH] fix tensorrt quantize bugs --- paddle2onnx/converter.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) mode change 100755 => 100644 paddle2onnx/converter.cc diff --git a/paddle2onnx/converter.cc b/paddle2onnx/converter.cc old mode 100755 new mode 100644 index d24490cc0..53e5d4817 --- a/paddle2onnx/converter.cc +++ b/paddle2onnx/converter.cc @@ -67,7 +67,8 @@ PADDLE2ONNX_DECL bool IsExportable(const char* model, const char* params, P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl; return false; } - if (deploy_backend == "tensorrt" && calibration_str.empty()) { + if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) && + calibration_str.empty()) { P2OLogger(verbose) << "Can not generate calibration cache for TensorRT " "deploy backend when export quantize model." << std::endl; @@ -120,7 +121,8 @@ PADDLE2ONNX_DECL bool IsExportable(const void* model_buffer, int model_size, P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl; return false; } - if (deploy_backend == "tensorrt" && calibration_str.empty()) { + if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) && + calibration_str.empty()) { P2OLogger(verbose) << "Can not generate calibration cache for TensorRT " "deploy backend when export quantize model." << std::endl; @@ -166,7 +168,8 @@ PADDLE2ONNX_DECL bool Export( P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl; return false; } - if (deploy_backend == "tensorrt" && calibration_str.empty()) { + if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) && + calibration_str.empty()) { P2OLogger(verbose) << "Can not generate calibration cache for TensorRT " "deploy backend when export quantize model." << std::endl; @@ -220,7 +223,9 @@ PADDLE2ONNX_DECL bool Export( P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl; return false; } - if (deploy_backend == "tensorrt" && calibration_str.empty()) { + + if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) && + calibration_str.empty()) { P2OLogger(verbose) << "Can not generate calibration cache for TensorRT " "deploy backend when export quantize model." << std::endl;