diff --git a/common/tvm_utility/data/user/yolo_v2_tiny/inference_engine_tvm_config.hpp b/common/tvm_utility/data/user/yolo_v2_tiny/inference_engine_tvm_config.hpp index 451824007f49d..06b7d9689f5c4 100644 --- a/common/tvm_utility/data/user/yolo_v2_tiny/inference_engine_tvm_config.hpp +++ b/common/tvm_utility/data/user/yolo_v2_tiny/inference_engine_tvm_config.hpp @@ -14,8 +14,8 @@ #include "tvm_utility/pipeline.hpp" -#ifndef YOLO_V2_TINY_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT -#define YOLO_V2_TINY_INFERENCE_ENGINE_TVM_CONFIG_HPP_ +#ifndef COMMON__TVM_UTILITY__DATA__USER__YOLO_V2_TINY__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#define COMMON__TVM_UTILITY__DATA__USER__YOLO_V2_TINY__INFERENCE_ENGINE_TVM_CONFIG_HPP_ namespace model_zoo { @@ -28,30 +28,22 @@ namespace yolo_v2_tiny namespace tensorflow_fp32_coco { -static const tvm_utility::pipeline::InferenceEngineTVMConfig config { - { - 3, - 0, - 0 - }, // modelzoo_version +static const tvm_utility::pipeline::InferenceEngineTVMConfig config{ + {3, 0, 0}, // modelzoo_version "yolo_v2_tiny", // network_name - "llvm", // network_backend + "llvm", // network_backend - "deploy_lib.so", //network_module_path - "deploy_graph.json", // network_graph_path + "deploy_lib.so", // network_module_path + "deploy_graph.json", // network_graph_path "deploy_param.params", // network_params_path kDLCPU, // tvm_device_type - 0, // tvm_device_id + 0, // tvm_device_id - { - {"input", kDLFloat, 32, 1, {-1, 416, 416, 3}} - }, // network_inputs + {{"input", kDLFloat, 32, 1, {-1, 416, 416, 3}}}, // network_inputs - { - {"output", kDLFloat, 32, 1, {1, 13, 13, 425}} - } // network_outputs + {{"output", kDLFloat, 32, 1, {1, 13, 13, 425}}} // network_outputs }; } // namespace tensorflow_fp32_coco @@ -59,4 +51,5 @@ static const tvm_utility::pipeline::InferenceEngineTVMConfig config { } // namespace camera_obstacle_detection } // namespace perception } // namespace model_zoo -#endif // YOLO_V2_TINY_INFERENCE_ENGINE_TVM_CONFIG_HPP_ // NOLINT +#endif // COMMON__TVM_UTILITY__DATA__USER__YOLO_V2_TINY__INFERENCE_ENGINE_TVM_CONFIG_HPP_ // + // NOLINT diff --git a/common/tvm_utility/include/tvm_utility/pipeline.hpp b/common/tvm_utility/include/tvm_utility/pipeline.hpp index d67921c3bcbf0..74bf3d57cf09c 100644 --- a/common/tvm_utility/include/tvm_utility/pipeline.hpp +++ b/common/tvm_utility/include/tvm_utility/pipeline.hpp @@ -225,14 +225,13 @@ class InferenceEngineTVM : public InferenceEngine { public: explicit InferenceEngineTVM( - const InferenceEngineTVMConfig & config, - const std::string & pkg_name, + const InferenceEngineTVMConfig & config, const std::string & pkg_name, const std::string & autoware_data_path) : config_(config) { // Get full network path - std::string network_prefix = autoware_data_path + pkg_name + - "/models/" + config.network_name + "/"; + std::string network_prefix = + autoware_data_path + pkg_name + "/models/" + config.network_name + "/"; std::string network_module_path = network_prefix + config.network_module_path; std::string network_graph_path = network_prefix + config.network_graph_path; std::string network_params_path = network_prefix + config.network_params_path; @@ -295,9 +294,7 @@ class InferenceEngineTVM : public InferenceEngine } } - explicit InferenceEngineTVM( - const InferenceEngineTVMConfig & config, - const std::string & pkg_name) + explicit InferenceEngineTVM(const InferenceEngineTVMConfig & config, const std::string & pkg_name) : config_(config) { // Get full network path