Skip to content

Commit

Permalink
Merge pull request #1054 from yeliang2258/fix_tensorrt_quantize_dev
Browse files Browse the repository at this point in the history
Fix tensorrt quantize bugs
  • Loading branch information
yeliang2258 committed Mar 15, 2023
2 parents 0af1092 + 7918064 commit ed9ed99
Showing 1 changed file with 9 additions and 4 deletions.
13 changes: 9 additions & 4 deletions paddle2onnx/converter.cc
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,8 @@ PADDLE2ONNX_DECL bool IsExportable(const char* model, const char* params,
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (deploy_backend == "tensorrt" && calibration_str.empty()) {
if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
Expand Down Expand Up @@ -120,7 +121,8 @@ PADDLE2ONNX_DECL bool IsExportable(const void* model_buffer, int model_size,
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (deploy_backend == "tensorrt" && calibration_str.empty()) {
if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
Expand Down Expand Up @@ -166,7 +168,8 @@ PADDLE2ONNX_DECL bool Export(
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (deploy_backend == "tensorrt" && calibration_str.empty()) {
if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
Expand Down Expand Up @@ -220,7 +223,9 @@ PADDLE2ONNX_DECL bool Export(
P2OLogger(verbose) << "The exported ONNX model is invalid!" << std::endl;
return false;
}
if (deploy_backend == "tensorrt" && calibration_str.empty()) {

if (parser.is_quantized_model && "tensorrt" == std::string(deploy_backend) &&
calibration_str.empty()) {
P2OLogger(verbose) << "Can not generate calibration cache for TensorRT "
"deploy backend when export quantize model."
<< std::endl;
Expand Down

0 comments on commit ed9ed99

Please sign in to comment.